[automerger skipped] DO NOT MERGE - Merge pi-dev@5234907 into stage-aosp-master
am: 4ab0beecd8 -s ours
am skip reason: subject contains skip directive

Change-Id: I51bcd661e0a30fbdac517c7c95e37b202a19d8ad
diff --git a/METADATA b/METADATA
index cca324d..e187847 100644
--- a/METADATA
+++ b/METADATA
@@ -11,7 +11,8 @@
     type: GIT
     value: "https://github.com/dabeaz/ply.git"
   }
-  version: "3.10"
-  last_upgrade_date { year: 2017 month: 11 day: 15 }
+  version: "3.11"
+  last_upgrade_date { year: 2018 month: 05 day: 22 }
   license_type: PERMISSIVE
 }
+
diff --git a/ply/ANNOUNCE b/ply/ANNOUNCE
index c430051..3e58250 100644
--- a/ply/ANNOUNCE
+++ b/ply/ANNOUNCE
@@ -1,11 +1,11 @@
-January 31, 2017
+February 15, 2018
 
-                  Announcing :  PLY-3.10 (Python Lex-Yacc)
+                  Announcing :  PLY-3.11 (Python Lex-Yacc)
 
                         http://www.dabeaz.com/ply
 
-I'm pleased to announce PLY-3.10--a pure Python implementation of the
-common parsing tools lex and yacc.  PLY-3.10 is a minor bug fix
+I'm pleased to announce PLY-3.11--a pure Python implementation of the
+common parsing tools lex and yacc.  PLY-3.11 is a minor bug fix
 release.  It supports both Python 2 and Python 3.
 
 If you are new to PLY, here are a few highlights:
diff --git a/ply/CHANGES b/ply/CHANGES
index 815c231..4405007 100644
--- a/ply/CHANGES
+++ b/ply/CHANGES
@@ -1,3 +1,16 @@
+Version 3.11
+---------------------
+02/15/18  beazley
+          Fixed some minor bugs related to re flags and token order.  
+          Github pull requests #151 and #153.
+
+02/15/18  beazley
+          Added a set_lexpos() method to grammar symbols.  Github issue #148.
+
+
+04/13/17  beazley
+          Mostly minor bug fixes and small code cleanups.
+
 Version 3.10
 ---------------------
 01/31/17: beazley
diff --git a/ply/PKG-INFO b/ply/PKG-INFO
new file mode 100644
index 0000000..f2d8c8a
--- /dev/null
+++ b/ply/PKG-INFO
@@ -0,0 +1,23 @@
+Metadata-Version: 1.1
+Name: ply
+Version: 3.11
+Summary: Python Lex & Yacc
+Home-page: http://www.dabeaz.com/ply/
+Author: David Beazley
+Author-email: dave@dabeaz.com
+License: BSD
+Description-Content-Type: UNKNOWN
+Description: 
+        PLY is yet another implementation of lex and yacc for Python. Some notable
+        features include the fact that its implemented entirely in Python and it
+        uses LALR(1) parsing which is efficient and well suited for larger grammars.
+        
+        PLY provides most of the standard lex/yacc features including support for empty 
+        productions, precedence rules, error recovery, and support for ambiguous grammars. 
+        
+        PLY is extremely easy to use and provides very extensive error checking. 
+        It is compatible with both Python 2 and Python 3.
+        
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 2
diff --git a/ply/README.md b/ply/README.md
index e428f1b..05df32a 100644
--- a/ply/README.md
+++ b/ply/README.md
@@ -1,6 +1,8 @@
-PLY (Python Lex-Yacc)                   Version 3.10
+# PLY (Python Lex-Yacc)                   Version 3.11
 
-Copyright (C) 2001-2017
+[![Build Status](https://travis-ci.org/dabeaz/ply.svg?branch=master)](https://travis-ci.org/dabeaz/ply)
+
+Copyright (C) 2001-2018
 David M. Beazley (Dabeaz LLC)
 All rights reserved.
 
diff --git a/ply/doc/internal.html b/ply/doc/internal.html
index f73bc43..57e87df 100644
--- a/ply/doc/internal.html
+++ b/ply/doc/internal.html
@@ -12,7 +12,7 @@
 </b>
 
 <p>
-<b>PLY Version: 3.10</b>
+<b>PLY Version: 3.11</b>
 <p>
 
 <!-- INDEX -->
diff --git a/ply/doc/ply.html b/ply/doc/ply.html
index 48ccdcb..b35ba44 100644
--- a/ply/doc/ply.html
+++ b/ply/doc/ply.html
@@ -12,13 +12,13 @@
 </b>
 
 <p>
-<b>PLY Version: 3.10</b>
+<b>PLY Version: 3.11</b>
 <p>
 
 <!-- INDEX -->
 <div class="sectiontoc">
 <ul>
-<li><a href="#ply_nn1">Preface and Requirements</a>
+<li><a href="#ply_nn0">Preface and Requirements</a>
 <li><a href="#ply_nn1">Introduction</a>
 <li><a href="#ply_nn2">PLY Overview</a>
 <li><a href="#ply_nn3">Lex</a>
@@ -34,7 +34,7 @@
 <li><a href="#ply_nn12">Error handling</a>
 <li><a href="#ply_nn14">EOF Handling</a>
 <li><a href="#ply_nn13">Building and using the lexer</a>
-<li><a href="#ply_nn14">The @TOKEN decorator</a>
+<li><a href="#ply_nn14b">The @TOKEN decorator</a>
 <li><a href="#ply_nn15">Optimized mode</a>
 <li><a href="#ply_nn16">Debugging</a>
 <li><a href="#ply_nn17">Alternative specification of lexers</a>
@@ -42,7 +42,7 @@
 <li><a href="#ply_nn19">Lexer cloning</a>
 <li><a href="#ply_nn20">Internal lexer state</a>
 <li><a href="#ply_nn21">Conditional lexing and start conditions</a>
-<li><a href="#ply_nn21">Miscellaneous Issues</a>
+<li><a href="#ply_nn21b">Miscellaneous Issues</a>
 </ul>
 <li><a href="#ply_nn22">Parsing basics</a>
 <li><a href="#ply_nn23">Yacc</a>
@@ -50,10 +50,10 @@
 <li><a href="#ply_nn24">An example</a>
 <li><a href="#ply_nn25">Combining Grammar Rule Functions</a>
 <li><a href="#ply_nn26">Character Literals</a>
-<li><a href="#ply_nn26">Empty Productions</a>
+<li><a href="#ply_nn26b">Empty Productions</a>
 <li><a href="#ply_nn28">Changing the starting symbol</a>
 <li><a href="#ply_nn27">Dealing With Ambiguous Grammars</a>
-<li><a href="#ply_nn28">The parser.out file</a>
+<li><a href="#ply_nn28b">The parser.out file</a>
 <li><a href="#ply_nn29">Syntax Error Handling</a>
 <ul>
 <li><a href="#ply_nn30">Recovery and resynchronization with error rules</a>
@@ -64,11 +64,11 @@
 </ul>
 <li><a href="#ply_nn33">Line Number and Position Tracking</a>
 <li><a href="#ply_nn34">AST Construction</a>
-<li><a href="#ply_nn35">Embedded Actions</a>
+<li><a href="#ply_nn35b">Embedded Actions</a>
 <li><a href="#ply_nn36">Miscellaneous Yacc Notes</a>
 </ul>
 <li><a href="#ply_nn37">Multiple Parsers and Lexers</a>
-<li><a href="#ply_nn38">Using Python's Optimized Mode</a>
+<li><a href="#ply_nn38b">Using Python's Optimized Mode</a>
 <li><a href="#ply_nn44">Advanced Debugging</a>
 <ul>
 <li><a href="#ply_nn45">Debugging the lex() and yacc() commands</a>
@@ -85,7 +85,7 @@
 
 
 
-<H2><a name="ply_nn1"></a>1. Preface and Requirements</H2>
+<H2><a name="ply_nn0"></a>1. Preface and Requirements</H2>
 
 
 <p>
@@ -558,15 +558,12 @@
 
 <blockquote>
 <pre>
-# Compute column. 
+# Compute column.
 #     input is the input text string
 #     token is a token instance
 def find_column(input, token):
-    last_cr = input.rfind('\n', 0, token.lexpos)
-    if last_cr < 0:
-	last_cr = 0
-    column = (token.lexpos - last_cr) + 1
-    return column
+    line_start = input.rfind('\n', 0, token.lexpos) + 1
+    return (token.lexpos - line_start) + 1
 </pre>
 </blockquote>
 
@@ -718,7 +715,7 @@
 None if the end of the input text has been reached.
 </ul>
 
-<H3><a name="ply_nn14"></a>4.12 The @TOKEN decorator</H3>
+<H3><a name="ply_nn14b"></a>4.12 The @TOKEN decorator</H3>
 
 
 In some applications, you may want to define build tokens from as a series of
@@ -1418,7 +1415,7 @@
 position), stores it, and returns a token 'CCODE' containing all of that text.  When returning the token, the lexing state is restored back to its
 initial state.
 
-<H3><a name="ply_nn21"></a>4.20 Miscellaneous Issues</H3>
+<H3><a name="ply_nn21b"></a>4.20 Miscellaneous Issues</H3>
 
 
 <P>
@@ -1438,10 +1435,13 @@
 
 <blockquote>
 <pre>
-lex.lex(reflags=re.UNICODE)
+lex.lex(reflags=re.UNICODE | re.VERBOSE)
 </pre>
 </blockquote>
 
+Note: by default, <tt>reflags</tt> is set to <tt>re.VERBOSE</tt>.  If you provide
+your own flags, you may need to include this for PLY to preserve its normal behavior.
+
 <p>
 <li>Since the lexer is written entirely in Python, its performance is
 largely determined by that of the Python <tt>re</tt> module.  Although
@@ -1890,7 +1890,7 @@
 <b>Character literals are limited to a single character</b>.  Thus, it is not legal to specify literals such as <tt>'&lt;='</tt> or <tt>'=='</tt>.  For this, use
 the normal lexing rules (e.g., define a rule such as <tt>t_EQ = r'=='</tt>).
 
-<H3><a name="ply_nn26"></a>6.4 Empty Productions</H3>
+<H3><a name="ply_nn26b"></a>6.4 Empty Productions</H3>
 
 
 <tt>yacc.py</tt> can handle empty productions by defining a rule like this:
@@ -2208,7 +2208,7 @@
 <tt>parser.out</tt> debugging file with an appropriately high level of
 caffeination.
 
-<H3><a name="ply_nn28"></a>6.7 The parser.out file</H3>
+<H3><a name="ply_nn28b"></a>6.7 The parser.out file</H3>
 
 
 Tracking down shift/reduce and reduce/reduce conflicts is one of the finer pleasures of using an LR
@@ -2950,7 +2950,7 @@
 </pre>
 </blockquote>
 
-<H3><a name="ply_nn35"></a>6.11 Embedded Actions</H3>
+<H3><a name="ply_nn35b"></a>6.11 Embedded Actions</H3>
 
 
 The parsing technique used by yacc only allows actions to be executed at the end of a rule.  For example,
@@ -3270,7 +3270,7 @@
 For example, if you wanted to have different parsing modes, you could attach a mode
 attribute to the parser object and look at it later.
 
-<H2><a name="ply_nn38"></a>8. Using Python's Optimized Mode</H2>
+<H2><a name="ply_nn38b"></a>8. Using Python's Optimized Mode</H2>
 
 
 Because PLY uses information from doc-strings, parsing and lexing
diff --git a/ply/ply/__init__.py b/ply/ply/__init__.py
index 6e53cdd..23707c6 100644
--- a/ply/ply/__init__.py
+++ b/ply/ply/__init__.py
@@ -1,5 +1,5 @@
 # PLY package
 # Author: David Beazley (dave@dabeaz.com)
 
-__version__ = '3.9'
+__version__ = '3.11'
 __all__ = ['lex','yacc']
diff --git a/ply/ply/cpp.py b/ply/ply/cpp.py
index b6bfc69..2422916 100644
--- a/ply/ply/cpp.py
+++ b/ply/ply/cpp.py
@@ -5,7 +5,7 @@
 # Copyright (C) 2007
 # All rights reserved
 #
-# This module implements an ANSI-C style lexical preprocessor for PLY. 
+# This module implements an ANSI-C style lexical preprocessor for PLY.
 # -----------------------------------------------------------------------------
 from __future__ import generators
 
@@ -78,7 +78,7 @@
     # replace with '/n'
     t.type = 'CPP_WS'; t.value = '\n'
     return t
-    
+
 def t_error(t):
     t.type = t.value[0]
     t.value = t.value[0]
@@ -92,8 +92,8 @@
 
 # -----------------------------------------------------------------------------
 # trigraph()
-# 
-# Given an input string, this function replaces all trigraph sequences. 
+#
+# Given an input string, this function replaces all trigraph sequences.
 # The following mapping is used:
 #
 #     ??=    #
@@ -263,7 +263,7 @@
     # ----------------------------------------------------------------------
     # add_path()
     #
-    # Adds a search path to the preprocessor.  
+    # Adds a search path to the preprocessor.
     # ----------------------------------------------------------------------
 
     def add_path(self,path):
@@ -307,7 +307,7 @@
 
     # ----------------------------------------------------------------------
     # tokenstrip()
-    # 
+    #
     # Remove leading/trailing whitespace tokens from a token list
     # ----------------------------------------------------------------------
 
@@ -333,7 +333,7 @@
     # argument.  Each argument is represented by a list of tokens.
     #
     # When collecting arguments, leading and trailing whitespace is removed
-    # from each argument.  
+    # from each argument.
     #
     # This function properly handles nested parenthesis and commas---these do not
     # define new arguments.
@@ -345,7 +345,7 @@
         current_arg = []
         nesting = 1
         tokenlen = len(tokenlist)
-    
+
         # Search for the opening '('.
         i = 0
         while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
@@ -379,7 +379,7 @@
             else:
                 current_arg.append(t)
             i += 1
-    
+
         # Missing end argument
         self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
         return 0, [],[]
@@ -391,9 +391,9 @@
     # This is used to speed up macro expansion later on---we'll know
     # right away where to apply patches to the value to form the expansion
     # ----------------------------------------------------------------------
-    
+
     def macro_prescan(self,macro):
-        macro.patch     = []             # Standard macro arguments 
+        macro.patch     = []             # Standard macro arguments
         macro.str_patch = []             # String conversion expansion
         macro.var_comma_patch = []       # Variadic macro comma patch
         i = 0
@@ -411,10 +411,11 @@
                 elif (i > 0 and macro.value[i-1].value == '##'):
                     macro.patch.append(('c',argnum,i-1))
                     del macro.value[i-1]
+                    i -= 1
                     continue
                 elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
                     macro.patch.append(('c',argnum,i))
-                    i += 1
+                    del macro.value[i + 1]
                     continue
                 # Standard expansion
                 else:
@@ -440,7 +441,7 @@
         rep = [copy.copy(_x) for _x in macro.value]
 
         # Make string expansion patches.  These do not alter the length of the replacement sequence
-        
+
         str_expansion = {}
         for argnum, i in macro.str_patch:
             if argnum not in str_expansion:
@@ -458,7 +459,7 @@
         # Make all other patches.   The order of these matters.  It is assumed that the patch list
         # has been sorted in reverse order of patch location since replacements will cause the
         # size of the replacement sequence to expand from the patch point.
-        
+
         expanded = { }
         for ptype, argnum, i in macro.patch:
             # Concatenation.   Argument is left unexpanded
@@ -495,7 +496,7 @@
                 if t.value in self.macros and t.value not in expanded:
                     # Yes, we found a macro match
                     expanded[t.value] = True
-                    
+
                     m = self.macros[t.value]
                     if not m.arglist:
                         # A simple macro
@@ -509,7 +510,7 @@
                         j = i + 1
                         while j < len(tokens) and tokens[j].type in self.t_WS:
                             j += 1
-                        if tokens[j].value == '(':
+                        if j < len(tokens) and tokens[j].value == '(':
                             tokcount,args,positions = self.collect_args(tokens[j:])
                             if not m.variadic and len(args) !=  len(m.arglist):
                                 self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
@@ -527,7 +528,7 @@
                                     else:
                                         args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
                                         del args[len(m.arglist):]
-                                        
+
                                 # Get macro replacement text
                                 rep = self.macro_expand_args(m,args)
                                 rep = self.expand_macros(rep,expanded)
@@ -535,18 +536,24 @@
                                     r.lineno = t.lineno
                                 tokens[i:j+tokcount] = rep
                                 i += len(rep)
+                        else:
+                            # This is not a macro. It is just a word which
+                            # equals to name of the macro. Hence, go to the
+                            # next token.
+                            i += 1
+
                     del expanded[t.value]
                     continue
                 elif t.value == '__LINE__':
                     t.type = self.t_INTEGER
                     t.value = self.t_INTEGER_TYPE(t.lineno)
-                
+
             i += 1
         return tokens
 
-    # ----------------------------------------------------------------------    
+    # ----------------------------------------------------------------------
     # evalexpr()
-    # 
+    #
     # Evaluate an expression token sequence for the purposes of evaluating
     # integral expressions.
     # ----------------------------------------------------------------------
@@ -593,7 +600,7 @@
                 tokens[i].value = str(tokens[i].value)
                 while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
                     tokens[i].value = tokens[i].value[:-1]
-        
+
         expr = "".join([str(x.value) for x in tokens])
         expr = expr.replace("&&"," and ")
         expr = expr.replace("||"," or ")
@@ -618,7 +625,7 @@
 
         if not source:
             source = ""
-            
+
         self.define("__FILE__ \"%s\"" % source)
 
         self.source = source
@@ -637,7 +644,7 @@
                 for tok in x:
                     if tok.type in self.t_WS and '\n' in tok.value:
                         chunk.append(tok)
-                
+
                 dirtokens = self.tokenstrip(x[i+1:])
                 if dirtokens:
                     name = dirtokens[0].value
@@ -645,7 +652,7 @@
                 else:
                     name = ""
                     args = []
-                
+
                 if name == 'define':
                     if enable:
                         for tok in self.expand_macros(chunk):
@@ -705,7 +712,7 @@
                                     iftrigger = True
                     else:
                         self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
-                        
+
                 elif name == 'else':
                     if ifstack:
                         if ifstack[-1][0]:
@@ -875,7 +882,7 @@
     def parse(self,input,source=None,ignore={}):
         self.ignore = ignore
         self.parser = self.parsegen(input,source)
-        
+
     # ----------------------------------------------------------------------
     # token()
     #
@@ -905,14 +912,3 @@
         tok = p.token()
         if not tok: break
         print(p.source, tok)
-
-
-
-
-    
-
-
-
-
-
-
diff --git a/ply/ply/ctokens.py b/ply/ply/ctokens.py
index f6f6952..b265e59 100644
--- a/ply/ply/ctokens.py
+++ b/ply/ply/ctokens.py
@@ -16,7 +16,7 @@
     'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
     'LOR', 'LAND', 'LNOT',
     'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
-    
+
     # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
     'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
     'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
@@ -29,7 +29,7 @@
 
     # Ternary operator (?)
     'TERNARY',
-    
+
     # Delimeters ( ) [ ] { } , . ; :
     'LPAREN', 'RPAREN',
     'LBRACKET', 'RBRACKET',
@@ -39,7 +39,7 @@
     # Ellipsis (...)
     'ELLIPSIS',
 ]
-    
+
 # Operators
 t_PLUS             = r'\+'
 t_MINUS            = r'-'
@@ -125,9 +125,3 @@
     r'//.*\n'
     t.lexer.lineno += 1
     return t
-
-
-    
-
-
-
diff --git a/ply/ply/lex.py b/ply/ply/lex.py
index 3e240d1..f95bcdb 100644
--- a/ply/ply/lex.py
+++ b/ply/ply/lex.py
@@ -1,7 +1,7 @@
 # -----------------------------------------------------------------------------
 # ply: lex.py
 #
-# Copyright (C) 2001-2017
+# Copyright (C) 2001-2018
 # David M. Beazley (Dabeaz LLC)
 # All rights reserved.
 #
@@ -31,7 +31,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 # -----------------------------------------------------------------------------
 
-__version__    = '3.10'
+__version__    = '3.11'
 __tabversion__ = '3.10'
 
 import re
@@ -179,12 +179,12 @@
         with open(filename, 'w') as tf:
             tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
             tf.write('_tabversion   = %s\n' % repr(__tabversion__))
-            tf.write('_lextokens    = set(%s)\n' % repr(tuple(self.lextokens)))
-            tf.write('_lexreflags   = %s\n' % repr(self.lexreflags))
+            tf.write('_lextokens    = set(%s)\n' % repr(tuple(sorted(self.lextokens))))
+            tf.write('_lexreflags   = %s\n' % repr(int(self.lexreflags)))
             tf.write('_lexliterals  = %s\n' % repr(self.lexliterals))
             tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
 
-            # Rewrite the lexstatere table, replacing function objects with function names 
+            # Rewrite the lexstatere table, replacing function objects with function names
             tabre = {}
             for statename, lre in self.lexstatere.items():
                 titem = []
@@ -531,12 +531,11 @@
 # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
 # -----------------------------------------------------------------------------
 def _statetoken(s, names):
-    nonstate = 1
     parts = s.split('_')
     for i, part in enumerate(parts[1:], 1):
         if part not in names and part != 'ANY':
             break
-    
+
     if i > 1:
         states = tuple(parts[1:i])
     else:
@@ -949,8 +948,6 @@
 
         # Add rules defined by functions first
         for fname, f in linfo.funcsym[state]:
-            line = f.__code__.co_firstlineno
-            file = f.__code__.co_filename
             regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
             if debug:
                 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
@@ -1041,6 +1038,8 @@
             outputdir = os.path.dirname(srcfile)
         try:
             lexobj.writetab(lextab, outputdir)
+            if lextab in sys.modules:
+                del sys.modules[lextab]
         except IOError as e:
             errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
 
@@ -1097,4 +1096,3 @@
 
 # Alternative spelling of the TOKEN decorator
 Token = TOKEN
-
diff --git a/ply/ply/yacc.py b/ply/ply/yacc.py
index 03bd86e..88188a1 100644
--- a/ply/ply/yacc.py
+++ b/ply/ply/yacc.py
@@ -1,7 +1,7 @@
 # -----------------------------------------------------------------------------
 # ply: yacc.py
 #
-# Copyright (C) 2001-2017
+# Copyright (C) 2001-2018
 # David M. Beazley (Dabeaz LLC)
 # All rights reserved.
 #
@@ -32,7 +32,7 @@
 # -----------------------------------------------------------------------------
 #
 # This implements an LR parser that is constructed from grammar rules defined
-# as Python functions. The grammer is specified by supplying the BNF inside
+# as Python functions. The grammar is specified by supplying the BNF inside
 # Python documentation strings.  The inspiration for this technique was borrowed
 # from John Aycock's Spark parsing system.  PLY might be viewed as cross between
 # Spark and the GNU bison utility.
@@ -64,10 +64,9 @@
 import sys
 import os.path
 import inspect
-import base64
 import warnings
 
-__version__    = '3.10'
+__version__    = '3.11'
 __tabversion__ = '3.10'
 
 #-----------------------------------------------------------------------------
@@ -268,6 +267,9 @@
     def lexpos(self, n):
         return getattr(self.slice[n], 'lexpos', 0)
 
+    def set_lexpos(self, n, lexpos):
+        self.slice[n].lexpos = lexpos
+
     def lexspan(self, n):
         startpos = getattr(self.slice[n], 'lexpos', 0)
         endpos = getattr(self.slice[n], 'endlexpos', startpos)
@@ -1360,7 +1362,7 @@
         p = LRItem(self, n)
         # Precompute the list of productions immediately following.
         try:
-            p.lr_after = Prodnames[p.prod[n+1]]
+            p.lr_after = self.Prodnames[p.prod[n+1]]
         except (IndexError, KeyError):
             p.lr_after = []
         try:
@@ -2301,7 +2303,6 @@
     # -----------------------------------------------------------------------------
 
     def dr_relation(self, C, trans, nullable):
-        dr_set = {}
         state, N = trans
         terms = []
 
@@ -2735,6 +2736,7 @@
             f.write('''
 # %s
 # This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
 _tabversion = %r
 
 _lr_method = %r
@@ -3072,7 +3074,7 @@
             self.error = True
             return
 
-        self.tokens = tokens
+        self.tokens = sorted(tokens)
 
     # Validate the tokens
     def validate_tokens(self):
@@ -3232,9 +3234,13 @@
     if module:
         _items = [(k, getattr(module, k)) for k in dir(module)]
         pdict = dict(_items)
-        # If no __file__ attribute is available, try to obtain it from the __module__ instead
+        # If no __file__ or __package__ attributes are available, try to obtain them
+        # from the __module__ instead
         if '__file__' not in pdict:
             pdict['__file__'] = sys.modules[pdict['__module__']].__file__
+        if '__package__' not in pdict and '__module__' in pdict:
+            if hasattr(sys.modules[pdict['__module__']], '__package__'):
+                pdict['__package__'] = sys.modules[pdict['__module__']].__package__
     else:
         pdict = get_caller_module_dict(2)
 
@@ -3476,6 +3482,8 @@
     if write_tables:
         try:
             lr.write_table(tabmodule, outputdir, signature)
+            if tabmodule in sys.modules:
+                del sys.modules[tabmodule]
         except IOError as e:
             errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
 
diff --git a/ply/ply/ygen.py b/ply/ply/ygen.py
index acf5ca1..03b9318 100644
--- a/ply/ply/ygen.py
+++ b/ply/ply/ygen.py
@@ -3,7 +3,7 @@
 # This is a support program that auto-generates different versions of the YACC parsing
 # function with different features removed for the purposes of performance.
 #
-# Users should edit the method LParser.parsedebug() in yacc.py.   The source code 
+# Users should edit the method LRParser.parsedebug() in yacc.py.   The source code
 # for that method is then used to create the other methods.   See the comments in
 # yacc.py for further details.
 
@@ -67,8 +67,3 @@
 
 if __name__ == '__main__':
     main()
-
-
-
-
-
diff --git a/ply/setup.cfg b/ply/setup.cfg
index 1eee7db..819449e 100644
--- a/ply/setup.cfg
+++ b/ply/setup.cfg
@@ -3,3 +3,8 @@
 
 [metadata]
 description-file = README.md
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/ply/setup.py b/ply/setup.py
index ee8ccd0..46bc6b3 100644
--- a/ply/setup.py
+++ b/ply/setup.py
@@ -17,7 +17,7 @@
 It is compatible with both Python 2 and Python 3.
 """,
             license="""BSD""",
-            version = "3.10",
+            version = "3.11",
             author = "David Beazley",
             author_email = "dave@dabeaz.com",
             maintainer = "David Beazley",
diff --git a/ply/test/README b/ply/test/README
index 52f032a..03b167c 100644
--- a/ply/test/README
+++ b/ply/test/README
@@ -3,5 +3,6 @@
 
   $ python testlex.py 
   $ python testyacc.py 
+  $ python testcpp.py
 
 The script 'cleanup.sh' cleans up this directory to its original state.
diff --git a/ply/test/lex_optimize4.py b/ply/test/lex_optimize4.py
new file mode 100644
index 0000000..cc6e2a9
--- /dev/null
+++ b/ply/test/lex_optimize4.py
@@ -0,0 +1,26 @@
+# -----------------------------------------------------------------------------
+# lex_optimize4.py
+# -----------------------------------------------------------------------------
+import re
+import sys
+
+if ".." not in sys.path: sys.path.insert(0,"..")
+import ply.lex as lex
+
+tokens = [
+    "PLUS",
+    "MINUS",
+    "NUMBER",
+    ]
+
+t_PLUS = r'\+?'
+t_MINUS = r'-'
+t_NUMBER = r'(\d+)'
+
+def t_error(t):
+    pass
+
+
+# Build the lexer
+lex.lex(optimize=True, lextab="opt4tab", reflags=re.UNICODE)
+lex.runmain(data="3+4")
diff --git a/ply/test/pkg_test1/parsing/lextab.py b/ply/test/pkg_test1/parsing/lextab.py
new file mode 100644
index 0000000..52376b2
--- /dev/null
+++ b/ply/test/pkg_test1/parsing/lextab.py
@@ -0,0 +1,10 @@
+# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
+_tabversion   = '3.10'
+_lextokens    = set(('DIVIDE', 'EQUALS', 'LPAREN', 'MINUS', 'NAME', 'NUMBER', 'PLUS', 'RPAREN', 'TIMES'))
+_lexreflags   = 64
+_lexliterals  = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere   = {'INITIAL': [('(?P<t_NUMBER>\\d+)|(?P<t_newline>\\n+)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_PLUS>\\+)|(?P<t_LPAREN>\\()|(?P<t_TIMES>\\*)|(?P<t_RPAREN>\\))|(?P<t_EQUALS>=)|(?P<t_DIVIDE>/)|(?P<t_MINUS>-)', [None, ('t_NUMBER', 'NUMBER'), ('t_newline', 'newline'), (None, 'NAME'), (None, 'PLUS'), (None, 'LPAREN'), (None, 'TIMES'), (None, 'RPAREN'), (None, 'EQUALS'), (None, 'DIVIDE'), (None, 'MINUS')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
diff --git a/ply/test/pkg_test1/parsing/parsetab.py b/ply/test/pkg_test1/parsing/parsetab.py
new file mode 100644
index 0000000..ba645aa
--- /dev/null
+++ b/ply/test/pkg_test1/parsing/parsetab.py
@@ -0,0 +1,40 @@
+
+# parsetab.py
+# This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
+_tabversion = '3.10'
+
+_lr_method = 'LALR'
+
+_lr_signature = 'leftPLUSMINUSleftTIMESDIVIDErightUMINUSDIVIDE EQUALS LPAREN MINUS NAME NUMBER PLUS RPAREN TIMESstatement : NAME EQUALS expressionstatement : expressionexpression : expression PLUS expression\n                  | expression MINUS expression\n                  | expression TIMES expression\n                  | expression DIVIDE expressionexpression : MINUS expression %prec UMINUSexpression : LPAREN expression RPARENexpression : NUMBERexpression : NAME'
+    
+_lr_action_items = {'PLUS':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,11,-10,-7,11,-8,11,-3,-4,-6,-5,]),'MINUS':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[1,1,-9,1,-10,12,-10,-7,12,1,1,1,1,1,-8,12,-3,-4,-6,-5,]),'EQUALS':([4,],[10,]),'NUMBER':([0,1,3,10,11,12,13,14,],[2,2,2,2,2,2,2,2,]),'LPAREN':([0,1,3,10,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'NAME':([0,1,3,10,11,12,13,14,],[4,7,7,7,7,7,7,7,]),'TIMES':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,14,-10,-7,14,-8,14,14,14,-6,-5,]),'$end':([2,4,5,6,7,8,15,16,17,18,19,20,],[-9,-10,0,-2,-10,-7,-8,-1,-3,-4,-6,-5,]),'RPAREN':([2,7,8,9,15,17,18,19,20,],[-9,-10,-7,15,-8,-3,-4,-6,-5,]),'DIVIDE':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,13,-10,-7,13,-8,13,13,13,-6,-5,]),}
+
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = {}
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'statement':([0,],[5,]),'expression':([0,1,3,10,11,12,13,14,],[6,8,9,16,17,18,19,20,]),}
+
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+   for _x, _y in zip(_v[0], _v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = {}
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+  ("S' -> statement","S'",1,None,None,None),
+  ('statement -> NAME EQUALS expression','statement',3,'p_statement_assign','calcparse.py',21),
+  ('statement -> expression','statement',1,'p_statement_expr','calcparse.py',25),
+  ('expression -> expression PLUS expression','expression',3,'p_expression_binop','calcparse.py',29),
+  ('expression -> expression MINUS expression','expression',3,'p_expression_binop','calcparse.py',30),
+  ('expression -> expression TIMES expression','expression',3,'p_expression_binop','calcparse.py',31),
+  ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calcparse.py',32),
+  ('expression -> MINUS expression','expression',2,'p_expression_uminus','calcparse.py',39),
+  ('expression -> LPAREN expression RPAREN','expression',3,'p_expression_group','calcparse.py',43),
+  ('expression -> NUMBER','expression',1,'p_expression_number','calcparse.py',47),
+  ('expression -> NAME','expression',1,'p_expression_name','calcparse.py',51),
+]
diff --git a/ply/test/pkg_test2/parsing/calclextab.py b/ply/test/pkg_test2/parsing/calclextab.py
new file mode 100644
index 0000000..a616c39
--- /dev/null
+++ b/ply/test/pkg_test2/parsing/calclextab.py
@@ -0,0 +1,10 @@
+# calclextab.py. This file automatically created by PLY (version 3.11). Don't edit!
+_tabversion   = '3.10'
+_lextokens    = set(('DIVIDE', 'EQUALS', 'LPAREN', 'MINUS', 'NAME', 'NUMBER', 'PLUS', 'RPAREN', 'TIMES'))
+_lexreflags   = 64
+_lexliterals  = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere   = {'INITIAL': [('(?P<t_NUMBER>\\d+)|(?P<t_newline>\\n+)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_PLUS>\\+)|(?P<t_LPAREN>\\()|(?P<t_TIMES>\\*)|(?P<t_RPAREN>\\))|(?P<t_EQUALS>=)|(?P<t_DIVIDE>/)|(?P<t_MINUS>-)', [None, ('t_NUMBER', 'NUMBER'), ('t_newline', 'newline'), (None, 'NAME'), (None, 'PLUS'), (None, 'LPAREN'), (None, 'TIMES'), (None, 'RPAREN'), (None, 'EQUALS'), (None, 'DIVIDE'), (None, 'MINUS')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
diff --git a/ply/test/pkg_test2/parsing/calcparsetab.py b/ply/test/pkg_test2/parsing/calcparsetab.py
new file mode 100644
index 0000000..23e3208
--- /dev/null
+++ b/ply/test/pkg_test2/parsing/calcparsetab.py
@@ -0,0 +1,40 @@
+
+# calcparsetab.py
+# This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
+_tabversion = '3.10'
+
+_lr_method = 'LALR'
+
+_lr_signature = 'leftPLUSMINUSleftTIMESDIVIDErightUMINUSDIVIDE EQUALS LPAREN MINUS NAME NUMBER PLUS RPAREN TIMESstatement : NAME EQUALS expressionstatement : expressionexpression : expression PLUS expression\n                  | expression MINUS expression\n                  | expression TIMES expression\n                  | expression DIVIDE expressionexpression : MINUS expression %prec UMINUSexpression : LPAREN expression RPARENexpression : NUMBERexpression : NAME'
+    
+_lr_action_items = {'PLUS':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,11,-10,-7,11,-8,11,-3,-4,-6,-5,]),'MINUS':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[1,1,-9,1,-10,12,-10,-7,12,1,1,1,1,1,-8,12,-3,-4,-6,-5,]),'EQUALS':([4,],[10,]),'NUMBER':([0,1,3,10,11,12,13,14,],[2,2,2,2,2,2,2,2,]),'LPAREN':([0,1,3,10,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'NAME':([0,1,3,10,11,12,13,14,],[4,7,7,7,7,7,7,7,]),'TIMES':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,14,-10,-7,14,-8,14,14,14,-6,-5,]),'$end':([2,4,5,6,7,8,15,16,17,18,19,20,],[-9,-10,0,-2,-10,-7,-8,-1,-3,-4,-6,-5,]),'RPAREN':([2,7,8,9,15,17,18,19,20,],[-9,-10,-7,15,-8,-3,-4,-6,-5,]),'DIVIDE':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,13,-10,-7,13,-8,13,13,13,-6,-5,]),}
+
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = {}
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'statement':([0,],[5,]),'expression':([0,1,3,10,11,12,13,14,],[6,8,9,16,17,18,19,20,]),}
+
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+   for _x, _y in zip(_v[0], _v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = {}
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+  ("S' -> statement","S'",1,None,None,None),
+  ('statement -> NAME EQUALS expression','statement',3,'p_statement_assign','calcparse.py',21),
+  ('statement -> expression','statement',1,'p_statement_expr','calcparse.py',25),
+  ('expression -> expression PLUS expression','expression',3,'p_expression_binop','calcparse.py',29),
+  ('expression -> expression MINUS expression','expression',3,'p_expression_binop','calcparse.py',30),
+  ('expression -> expression TIMES expression','expression',3,'p_expression_binop','calcparse.py',31),
+  ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calcparse.py',32),
+  ('expression -> MINUS expression','expression',2,'p_expression_uminus','calcparse.py',39),
+  ('expression -> LPAREN expression RPAREN','expression',3,'p_expression_group','calcparse.py',43),
+  ('expression -> NUMBER','expression',1,'p_expression_number','calcparse.py',47),
+  ('expression -> NAME','expression',1,'p_expression_name','calcparse.py',51),
+]
diff --git a/ply/test/pkg_test3/generated/lextab.py b/ply/test/pkg_test3/generated/lextab.py
new file mode 100644
index 0000000..52376b2
--- /dev/null
+++ b/ply/test/pkg_test3/generated/lextab.py
@@ -0,0 +1,10 @@
+# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
+_tabversion   = '3.10'
+_lextokens    = set(('DIVIDE', 'EQUALS', 'LPAREN', 'MINUS', 'NAME', 'NUMBER', 'PLUS', 'RPAREN', 'TIMES'))
+_lexreflags   = 64
+_lexliterals  = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere   = {'INITIAL': [('(?P<t_NUMBER>\\d+)|(?P<t_newline>\\n+)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_PLUS>\\+)|(?P<t_LPAREN>\\()|(?P<t_TIMES>\\*)|(?P<t_RPAREN>\\))|(?P<t_EQUALS>=)|(?P<t_DIVIDE>/)|(?P<t_MINUS>-)', [None, ('t_NUMBER', 'NUMBER'), ('t_newline', 'newline'), (None, 'NAME'), (None, 'PLUS'), (None, 'LPAREN'), (None, 'TIMES'), (None, 'RPAREN'), (None, 'EQUALS'), (None, 'DIVIDE'), (None, 'MINUS')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
diff --git a/ply/test/pkg_test3/generated/parsetab.py b/ply/test/pkg_test3/generated/parsetab.py
new file mode 100644
index 0000000..ba645aa
--- /dev/null
+++ b/ply/test/pkg_test3/generated/parsetab.py
@@ -0,0 +1,40 @@
+
+# parsetab.py
+# This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
+_tabversion = '3.10'
+
+_lr_method = 'LALR'
+
+_lr_signature = 'leftPLUSMINUSleftTIMESDIVIDErightUMINUSDIVIDE EQUALS LPAREN MINUS NAME NUMBER PLUS RPAREN TIMESstatement : NAME EQUALS expressionstatement : expressionexpression : expression PLUS expression\n                  | expression MINUS expression\n                  | expression TIMES expression\n                  | expression DIVIDE expressionexpression : MINUS expression %prec UMINUSexpression : LPAREN expression RPARENexpression : NUMBERexpression : NAME'
+    
+_lr_action_items = {'PLUS':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,11,-10,-7,11,-8,11,-3,-4,-6,-5,]),'MINUS':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[1,1,-9,1,-10,12,-10,-7,12,1,1,1,1,1,-8,12,-3,-4,-6,-5,]),'EQUALS':([4,],[10,]),'NUMBER':([0,1,3,10,11,12,13,14,],[2,2,2,2,2,2,2,2,]),'LPAREN':([0,1,3,10,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'NAME':([0,1,3,10,11,12,13,14,],[4,7,7,7,7,7,7,7,]),'TIMES':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,14,-10,-7,14,-8,14,14,14,-6,-5,]),'$end':([2,4,5,6,7,8,15,16,17,18,19,20,],[-9,-10,0,-2,-10,-7,-8,-1,-3,-4,-6,-5,]),'RPAREN':([2,7,8,9,15,17,18,19,20,],[-9,-10,-7,15,-8,-3,-4,-6,-5,]),'DIVIDE':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,13,-10,-7,13,-8,13,13,13,-6,-5,]),}
+
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = {}
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'statement':([0,],[5,]),'expression':([0,1,3,10,11,12,13,14,],[6,8,9,16,17,18,19,20,]),}
+
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+   for _x, _y in zip(_v[0], _v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = {}
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+  ("S' -> statement","S'",1,None,None,None),
+  ('statement -> NAME EQUALS expression','statement',3,'p_statement_assign','calcparse.py',21),
+  ('statement -> expression','statement',1,'p_statement_expr','calcparse.py',25),
+  ('expression -> expression PLUS expression','expression',3,'p_expression_binop','calcparse.py',29),
+  ('expression -> expression MINUS expression','expression',3,'p_expression_binop','calcparse.py',30),
+  ('expression -> expression TIMES expression','expression',3,'p_expression_binop','calcparse.py',31),
+  ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calcparse.py',32),
+  ('expression -> MINUS expression','expression',2,'p_expression_uminus','calcparse.py',39),
+  ('expression -> LPAREN expression RPAREN','expression',3,'p_expression_group','calcparse.py',43),
+  ('expression -> NUMBER','expression',1,'p_expression_number','calcparse.py',47),
+  ('expression -> NAME','expression',1,'p_expression_name','calcparse.py',51),
+]
diff --git a/ply/test/pkg_test5/parsing/lextab.py b/ply/test/pkg_test5/parsing/lextab.py
new file mode 100644
index 0000000..8cab298
--- /dev/null
+++ b/ply/test/pkg_test5/parsing/lextab.py
@@ -0,0 +1,10 @@
+# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
+_tabversion   = '3.10'
+_lextokens    = set(('DIVIDE', 'EQUALS', 'LPAREN', 'MINUS', 'NAME', 'NUMBER', 'PLUS', 'RPAREN', 'TIMES'))
+_lexreflags   = 64
+_lexliterals  = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere   = {'INITIAL': [('(?P<t_NUMBER>\\d+)|(?P<t_newline>\\n+)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_LPAREN>\\()|(?P<t_PLUS>\\+)|(?P<t_TIMES>\\*)|(?P<t_RPAREN>\\))|(?P<t_EQUALS>=)|(?P<t_DIVIDE>/)|(?P<t_MINUS>-)', [None, ('t_NUMBER', 'NUMBER'), ('t_newline', 'newline'), (None, 'NAME'), (None, 'LPAREN'), (None, 'PLUS'), (None, 'TIMES'), (None, 'RPAREN'), (None, 'EQUALS'), (None, 'DIVIDE'), (None, 'MINUS')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
diff --git a/ply/test/pkg_test5/parsing/parsetab.py b/ply/test/pkg_test5/parsing/parsetab.py
new file mode 100644
index 0000000..ba645aa
--- /dev/null
+++ b/ply/test/pkg_test5/parsing/parsetab.py
@@ -0,0 +1,40 @@
+
+# parsetab.py
+# This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
+_tabversion = '3.10'
+
+_lr_method = 'LALR'
+
+_lr_signature = 'leftPLUSMINUSleftTIMESDIVIDErightUMINUSDIVIDE EQUALS LPAREN MINUS NAME NUMBER PLUS RPAREN TIMESstatement : NAME EQUALS expressionstatement : expressionexpression : expression PLUS expression\n                  | expression MINUS expression\n                  | expression TIMES expression\n                  | expression DIVIDE expressionexpression : MINUS expression %prec UMINUSexpression : LPAREN expression RPARENexpression : NUMBERexpression : NAME'
+    
+_lr_action_items = {'PLUS':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,11,-10,-7,11,-8,11,-3,-4,-6,-5,]),'MINUS':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[1,1,-9,1,-10,12,-10,-7,12,1,1,1,1,1,-8,12,-3,-4,-6,-5,]),'EQUALS':([4,],[10,]),'NUMBER':([0,1,3,10,11,12,13,14,],[2,2,2,2,2,2,2,2,]),'LPAREN':([0,1,3,10,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'NAME':([0,1,3,10,11,12,13,14,],[4,7,7,7,7,7,7,7,]),'TIMES':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,14,-10,-7,14,-8,14,14,14,-6,-5,]),'$end':([2,4,5,6,7,8,15,16,17,18,19,20,],[-9,-10,0,-2,-10,-7,-8,-1,-3,-4,-6,-5,]),'RPAREN':([2,7,8,9,15,17,18,19,20,],[-9,-10,-7,15,-8,-3,-4,-6,-5,]),'DIVIDE':([2,4,6,7,8,9,15,16,17,18,19,20,],[-9,-10,13,-10,-7,13,-8,13,13,13,-6,-5,]),}
+
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = {}
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'statement':([0,],[5,]),'expression':([0,1,3,10,11,12,13,14,],[6,8,9,16,17,18,19,20,]),}
+
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+   for _x, _y in zip(_v[0], _v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = {}
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+  ("S' -> statement","S'",1,None,None,None),
+  ('statement -> NAME EQUALS expression','statement',3,'p_statement_assign','calcparse.py',21),
+  ('statement -> expression','statement',1,'p_statement_expr','calcparse.py',25),
+  ('expression -> expression PLUS expression','expression',3,'p_expression_binop','calcparse.py',29),
+  ('expression -> expression MINUS expression','expression',3,'p_expression_binop','calcparse.py',30),
+  ('expression -> expression TIMES expression','expression',3,'p_expression_binop','calcparse.py',31),
+  ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calcparse.py',32),
+  ('expression -> MINUS expression','expression',2,'p_expression_uminus','calcparse.py',39),
+  ('expression -> LPAREN expression RPAREN','expression',3,'p_expression_group','calcparse.py',43),
+  ('expression -> NUMBER','expression',1,'p_expression_number','calcparse.py',47),
+  ('expression -> NAME','expression',1,'p_expression_name','calcparse.py',51),
+]
diff --git a/ply/test/pkg_test6/parsing/lextab.py b/ply/test/pkg_test6/parsing/lextab.py
new file mode 100644
index 0000000..8cab298
--- /dev/null
+++ b/ply/test/pkg_test6/parsing/lextab.py
@@ -0,0 +1,10 @@
+# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!
+_tabversion   = '3.10'
+_lextokens    = set(('DIVIDE', 'EQUALS', 'LPAREN', 'MINUS', 'NAME', 'NUMBER', 'PLUS', 'RPAREN', 'TIMES'))
+_lexreflags   = 64
+_lexliterals  = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere   = {'INITIAL': [('(?P<t_NUMBER>\\d+)|(?P<t_newline>\\n+)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_LPAREN>\\()|(?P<t_PLUS>\\+)|(?P<t_TIMES>\\*)|(?P<t_RPAREN>\\))|(?P<t_EQUALS>=)|(?P<t_DIVIDE>/)|(?P<t_MINUS>-)', [None, ('t_NUMBER', 'NUMBER'), ('t_newline', 'newline'), (None, 'NAME'), (None, 'LPAREN'), (None, 'PLUS'), (None, 'TIMES'), (None, 'RPAREN'), (None, 'EQUALS'), (None, 'DIVIDE'), (None, 'MINUS')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
diff --git a/ply/test/pkg_test6/parsing/parsetab.py b/ply/test/pkg_test6/parsing/parsetab.py
new file mode 100644
index 0000000..0b312e1
--- /dev/null
+++ b/ply/test/pkg_test6/parsing/parsetab.py
@@ -0,0 +1,40 @@
+
+# parsetab.py
+# This file is automatically generated. Do not edit.
+# pylint: disable=W,C,R
+_tabversion = '3.10'
+
+_lr_method = 'LALR'
+
+_lr_signature = 'leftPLUSMINUSleftTIMESDIVIDErightUMINUSDIVIDE EQUALS LPAREN MINUS NAME NUMBER PLUS RPAREN TIMESexpression : expression PLUS expression\n                  | expression MINUS expression\n                  | expression TIMES expression\n                  | expression DIVIDE expressionstatement : NAME EQUALS expressionstatement : expressionexpression : MINUS expression %prec UMINUSexpression : LPAREN expression RPARENexpression : NUMBERexpression : NAME'
+    
+_lr_action_items = {'PLUS':([3,4,5,6,7,12,13,14,15,16,],[-10,8,-9,-7,8,-8,-1,-2,-4,-3,]),'MINUS':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,],[1,1,1,-10,9,-9,-7,9,1,1,1,1,-8,-1,-2,-4,-3,]),'NUMBER':([0,1,2,8,9,10,11,],[5,5,5,5,5,5,5,]),'LPAREN':([0,1,2,8,9,10,11,],[2,2,2,2,2,2,2,]),'NAME':([0,1,2,8,9,10,11,],[3,3,3,3,3,3,3,]),'TIMES':([3,4,5,6,7,12,13,14,15,16,],[-10,11,-9,-7,11,-8,11,11,-4,-3,]),'$end':([3,4,5,6,12,13,14,15,16,],[-10,0,-9,-7,-8,-1,-2,-4,-3,]),'RPAREN':([3,5,6,7,12,13,14,15,16,],[-10,-9,-7,12,-8,-1,-2,-4,-3,]),'DIVIDE':([3,4,5,6,7,12,13,14,15,16,],[-10,10,-9,-7,10,-8,10,10,-4,-3,]),}
+
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = {}
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+
+_lr_goto_items = {'expression':([0,1,2,8,9,10,11,],[4,6,7,13,14,15,16,]),}
+
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+   for _x, _y in zip(_v[0], _v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = {}
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+_lr_productions = [
+  ("S' -> expression","S'",1,None,None,None),
+  ('expression -> expression PLUS expression','expression',3,'p_expression_binop','expression.py',4),
+  ('expression -> expression MINUS expression','expression',3,'p_expression_binop','expression.py',5),
+  ('expression -> expression TIMES expression','expression',3,'p_expression_binop','expression.py',6),
+  ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','expression.py',7),
+  ('statement -> NAME EQUALS expression','statement',3,'p_statement_assign','statement.py',4),
+  ('statement -> expression','statement',1,'p_statement_expr','statement.py',8),
+  ('expression -> MINUS expression','expression',2,'p_expression_uminus','expression.py',14),
+  ('expression -> LPAREN expression RPAREN','expression',3,'p_expression_group','expression.py',18),
+  ('expression -> NUMBER','expression',1,'p_expression_number','expression.py',22),
+  ('expression -> NAME','expression',1,'p_expression_name','expression.py',26),
+]
diff --git a/ply/test/testcpp.py b/ply/test/testcpp.py
new file mode 100644
index 0000000..2e98edd
--- /dev/null
+++ b/ply/test/testcpp.py
@@ -0,0 +1,101 @@
+from unittest import TestCase, main
+
+from multiprocessing import Process, Queue
+from six.moves.queue import Empty
+
+import sys
+
+if ".." not in sys.path:
+    sys.path.insert(0, "..")
+
+from ply.lex import lex
+from ply.cpp import *
+
+
+def preprocessing(in_, out_queue):
+    out = None
+
+    try:
+        p = Preprocessor(lex())
+        p.parse(in_)
+        tokens = [t.value for t in p.parser]
+        out = "".join(tokens)
+    finally:
+        out_queue.put(out)
+
+class CPPTests(TestCase):
+    "Tests related to ANSI-C style lexical preprocessor."
+
+    def __test_preprocessing(self, in_, expected, time_limit = 1.0):
+        out_queue = Queue()
+
+        preprocessor = Process(
+            name = "PLY`s C preprocessor",
+            target = preprocessing,
+            args = (in_, out_queue)
+        )
+
+        preprocessor.start()
+
+        try:
+            out = out_queue.get(timeout = time_limit)
+        except Empty:
+            preprocessor.terminate()
+            raise RuntimeError("Time limit exceeded!")
+        else:
+            self.assertMultiLineEqual(out, expected)
+
+    def test_concatenation(self):
+        self.__test_preprocessing("""\
+#define a(x) x##_
+#define b(x) _##x
+#define c(x) _##x##_
+#define d(x,y) _##x##y##_
+
+a(i)
+b(j)
+c(k)
+d(q,s)"""
+            , """\
+
+
+
+
+
+i_
+_j
+_k_
+_qs_"""
+        )
+
+    def test_deadloop_macro(self):
+        # If there is a word which equals to name of a parametrized macro, then
+        # attempt to expand such word as a macro manages the parser to fall
+        # into an infinite loop.
+
+        self.__test_preprocessing("""\
+#define a(x) x
+
+a;"""
+            , """\
+
+
+a;"""
+        )
+
+    def test_index_error(self):
+        # If there are no tokens after a word ("a") which equals to name of
+        # a parameterized macro, then attempt to expand this word leads to
+        # IndexError.
+
+        self.__test_preprocessing("""\
+#define a(x) x
+
+a"""
+            , """\
+
+
+a"""
+        )
+
+main()
diff --git a/ply/test/testlex.py b/ply/test/testlex.py
index 3880f6f..83070a7 100755
--- a/ply/test/testlex.py
+++ b/ply/test/testlex.py
@@ -514,6 +514,26 @@
         except OSError:
             pass
 
+    def test_lex_optimize4(self):
+
+        # Regression test to make sure that reflags works correctly
+        # on Python 3.
+
+        for extension in ['py', 'pyc']:
+            try:
+                os.remove("opt4tab.{0}".format(extension))
+            except OSError:
+                pass
+
+        run_import("lex_optimize4")
+        run_import("lex_optimize4")
+
+        for extension in ['py', 'pyc']:
+            try:
+                os.remove("opt4tab.{0}".format(extension))
+            except OSError:
+                pass
+
     def test_lex_opt_alias(self):
         try:
             os.remove("aliastab.py")