Use Env Variables for ACTS keys

-Add ACTS_LOGPATH env key support to specify the
 root log path for ACTS output
-Add the ACTS_TESTPATHS env key to specify the test
 paths for ACTS to search for test cases

Bug: 31122931 
Change-Id: Ic9e267de2787f124f656816443c2a45b5d578697
(cherry picked from commit d3fc746121886d173973d4e98e6adfc5129def4c)
diff --git a/acts/README.md b/acts/README.md
index 4ff768b..dca9f6d 100644
--- a/acts/README.md
+++ b/acts/README.md
@@ -86,7 +86,7 @@
 
 ### Configuration Files
 To run tests, required information must be provided via a json-formatted
-text file. The required information includes a list of “testbed” configs.
+text file. The required information includes a list of ***testbed*** configs.
 Each specifies the hardware, services, the path to the logs directory, and
 a list of paths where the python test case files are located. Below are the
 contents of a sample configuration file:
@@ -104,6 +104,11 @@
     "custom_param1": {"favorite_food": "Icecream!"}
 }
 ```
+The ***testpaths*** and ***logpath*** keys may alternately be supplied via the
+execution environment though the ACTS_TESTPATHS and ACTS_LOGPATH keys
+respectively. To specify multiple test paths, the key should follow
+standard a ':'-delimited format. Explicit keys in a configuration file will
+override any defaults provided by the environment.
 
 ### Test Class
 Test classes are instantiated with a dictionary of “controllers”. The
diff --git a/acts/framework/acts/bin/act.py b/acts/framework/acts/bin/act.py
index 8fa87d2..879e7a3 100755
--- a/acts/framework/acts/bin/act.py
+++ b/acts/framework/acts/bin/act.py
@@ -31,6 +31,13 @@
 from acts.utils import load_config
 from acts.utils import valid_filename_chars
 
+# An environment variable defining the base location for ACTS logs.
+_ENV_ACTS_LOGPATH = 'ACTS_LOGPATH'
+
+# An environment variable defining the test search paths for ACTS.
+_ENV_ACTS_TESTPATHS = 'ACTS_TESTPATHS'
+_PATH_SEPARATOR = ':'
+
 
 def _validate_test_config(test_config):
     """Validates the raw configuration loaded from the config file.
@@ -40,7 +47,8 @@
     for k in Config.reserved_keys.value:
         if k not in test_config:
             raise USERError(("Required key {} missing in test "
-            "config.").format(k))
+                             "config.").format(k))
+
 
 def _validate_testbed_name(name):
     """Validates the name of a test bed.
@@ -62,6 +70,7 @@
         if l not in valid_filename_chars:
             raise USERError("Char '%s' is not allowed in test bed names." % l)
 
+
 def _validate_testbed_configs(testbed_configs):
     """Validates the testbed configurations.
 
@@ -83,11 +92,13 @@
             raise USERError("Duplicate testbed name {} found.".format(name))
         seen_names.add(name)
 
+
 def _verify_test_class_name(test_cls_name):
     if not test_cls_name.endswith("Test"):
         raise USERError(("Requested test class '%s' does not follow the test "
                          "class naming convention *Test.") % test_cls_name)
 
+
 def _parse_one_test_specifier(item):
     """Parse one test specifier from command line input.
 
@@ -121,13 +132,14 @@
         for elem in test_case_names.split(','):
             test_case_name = elem.strip()
             if not test_case_name.startswith("test_"):
-                    raise USERError(("Requested test case '%s' in test class "
-                                    "'%s' does not follow the test case "
-                                    "naming convention test_*.") % (
-                                    test_case_name, test_cls_name))
+                raise USERError(("Requested test case '%s' in test class "
+                                 "'%s' does not follow the test case "
+                                 "naming convention test_*.") %
+                                (test_case_name, test_cls_name))
             clean_names.append(test_case_name)
         return (test_cls_name, clean_names)
 
+
 def parse_test_list(test_list):
     """Parse user provided test list into internal format for test_runner.
 
@@ -160,15 +172,30 @@
                 if tb[Config.key_testbed_name.value] in tb_filters:
                     tbs.append(tb)
             if len(tbs) != len(tb_filters):
-                print("Expect to find %d test bed configs, found %d." % (
-                    len(tb_filters), len(tbs)))
+                print("Expect to find %d test bed configs, found %d." %
+                      (len(tb_filters), len(tbs)))
                 print("Check if you have the correct test bed names.")
                 return None
             configs[Config.key_testbed.value] = tbs
+
+        if (not Config.key_log_path.value in configs and
+                _ENV_ACTS_LOGPATH in os.environ):
+            print('Using environment log path: %s' %
+                  (os.environ[_ENV_ACTS_LOGPATH]))
+            configs[Config.key_log_path.value] = os.environ[_ENV_ACTS_LOGPATH]
+        if (not Config.key_test_paths.value in configs and
+                _ENV_ACTS_TESTPATHS in os.environ):
+            print('Using environment test paths: %s' %
+                  (os.environ[_ENV_ACTS_TESTPATHS]))
+            configs[Config.key_test_paths.value] = os.environ[
+                _ENV_ACTS_TESTPATHS].split(_PATH_SEPARATOR)
+
         _validate_test_config(configs)
         _validate_testbed_configs(configs[Config.key_testbed.value])
         k_log_path = Config.key_log_path.value
         configs[k_log_path] = abs_path(configs[k_log_path])
+        config_path, _ = os.path.split(abs_path(test_config_path))
+        configs[Config.key_config_path] = config_path
         tps = configs[Config.key_test_paths.value]
     except USERError as e:
         print("Something is wrong in the test configurations.")
@@ -192,6 +219,7 @@
         config_jsons.append(new_test_config)
     return config_jsons
 
+
 def _run_test(test_runner, repeat=1):
     """Instantiate and runs TestRunner.
 
@@ -214,13 +242,16 @@
     finally:
         test_runner.stop()
 
+
 def _gen_term_signal_handler(test_runners):
     def termination_sig_handler(signal_num, frame):
         for t in test_runners:
             t.stop()
         sys.exit(1)
+
     return termination_sig_handler
 
+
 def _run_tests_parallel(process_args):
     print("Executing {} concurrent test runs.".format(len(process_args)))
     results = concurrent_exec(_run_test, process_args)
@@ -228,6 +259,7 @@
         if r is False or isinstance(r, Exception):
             return False
 
+
 def _run_tests_sequential(process_args):
     ok = True
     for args in process_args:
@@ -235,6 +267,7 @@
             ok = False
     return ok
 
+
 def _parse_test_file(fpath):
     try:
         with open(fpath, 'r') as f:
@@ -252,31 +285,63 @@
         print("Error loading test file.")
         raise
 
+
 def main(argv):
-    parser = argparse.ArgumentParser(description=("Specify tests to run. If "
-                 "nothing specified, run all test cases found."))
-    parser.add_argument('-c', '--config', nargs=1, type=str, required=True,
-        metavar="<PATH>", help="Path to the test configuration file.")
-    parser.add_argument('--test_args', nargs='+', type=str,
+    parser = argparse.ArgumentParser(description=(
+        "Specify tests to run. If "
+        "nothing specified, run all test cases found."))
+    parser.add_argument(
+        '-c',
+        '--config',
+        nargs=1,
+        type=str,
+        required=True,
+        metavar="<PATH>",
+        help="Path to the test configuration file.")
+    parser.add_argument(
+        '--test_args',
+        nargs='+',
+        type=str,
         metavar="Arg1 Arg2 ...",
         help=("Command-line arguments to be passed to every test case in a "
               "test run. Use with caution."))
-    parser.add_argument('-d', '--debug', action="store_true",
+    parser.add_argument(
+        '-d',
+        '--debug',
+        action="store_true",
         help=("Set this flag if manual debugging is required."))
-    parser.add_argument('-p', '--parallel', action="store_true",
+    parser.add_argument(
+        '-p',
+        '--parallel',
+        action="store_true",
         help=("If set, tests will be executed on all testbeds in parallel. "
               "Otherwise, tests are executed iteratively testbed by testbed."))
-    parser.add_argument('-r', '--repeat', type=int,
+    parser.add_argument(
+        '-r',
+        '--repeat',
+        type=int,
         metavar="<NUMBER>",
         help="Number of times to run the specified test cases.")
-    parser.add_argument('-tb', '--testbed', nargs='+', type=str,
+    parser.add_argument(
+        '-tb',
+        '--testbed',
+        nargs='+',
+        type=str,
         metavar="[<TEST BED NAME1> <TEST BED NAME2> ...]",
         help="Specify which test beds to run tests on.")
     group = parser.add_mutually_exclusive_group(required=True)
-    group.add_argument('-tc', '--testclass', nargs='+', type=str,
+    group.add_argument(
+        '-tc',
+        '--testclass',
+        nargs='+',
+        type=str,
         metavar="[TestClass1 TestClass2:test_xxx ...]",
         help="A list of test classes/cases to run.")
-    group.add_argument('-tf', '--testfile', nargs=1, type=str,
+    group.add_argument(
+        '-tf',
+        '--testfile',
+        nargs=1,
+        type=str,
         metavar="<PATH>",
         help=("Path to a file containing a comma delimited list of test "
               "classes to run."))
@@ -322,6 +387,6 @@
         sys.exit(1)
     sys.exit(0)
 
+
 if __name__ == "__main__":
     main(sys.argv[1:])
-