Snap for 6001391 from a244eb82e97ba734c2f06a28395a68133b850d78 to qt-aml-tzdata-release

Change-Id: I06f1bdfd674ed8c4105ea37f31301b057a949884
diff --git a/BUILD.bazel b/BUILD.bazel
index 6ee69f2..d97a019 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -8,6 +8,8 @@
     visibility = [":__subpackages__"],
 )
 
+load("@rules_cc//cc:defs.bzl", "cc_library")
+
 cc_library(
     name = "benchmark",
     srcs = glob(
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9db1361..8cfe125 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -33,6 +33,7 @@
 # in cases where it is not possible to build or find a valid version of gtest.
 option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
 
+set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
 set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
 function(should_enable_assembly_tests)
   if(CMAKE_BUILD_TYPE)
diff --git a/METADATA b/METADATA
index 9d04a9a..fb2362a 100644
--- a/METADATA
+++ b/METADATA
@@ -9,10 +9,10 @@
     type: GIT
     value: "https://github.com/google/benchmark.git"
   }
-  version: "f4f5dba46bdbde0e95d736cca124025745bcd7b6"
+  version: "c50ac68c50ff8da3827cd6720792117910d85666"
   last_upgrade_date {
     year: 2019
-    month: 10
-    day: 7
+    month: 11
+    day: 6
   }
 }
diff --git a/WORKSPACE b/WORKSPACE
index 9a75f96..8df248a 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -3,7 +3,13 @@
 load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 
 http_archive(
-     name = "com_google_googletest",
-     urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
-     strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
+    name = "rules_cc",
+    strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
+    urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
+)
+
+http_archive(
+    name = "com_google_googletest",
+    strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
+    urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
 )
diff --git a/appveyor.yml b/appveyor.yml
index cf24019..81da955 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -41,7 +41,7 @@
   - cmake --build . --config %configuration%
 
 test_script:
-  - ctest -c %configuration% --timeout 300 --output-on-failure
+  - ctest --build-config %configuration% --timeout 300 --output-on-failure
 
 artifacts:
   - path: '_build/CMakeFiles/*.log'
diff --git a/src/benchmark.cc b/src/benchmark.cc
index 9af0701..07942eb 100644
--- a/src/benchmark.cc
+++ b/src/benchmark.cc
@@ -73,14 +73,14 @@
 // Report the result of each benchmark repetitions. When 'true' is specified
 // only the mean, standard deviation, and other statistics are reported for
 // repeated benchmarks. Affects all reporters.
-DEFINE_bool( benchmark_report_aggregates_only, false);
+DEFINE_bool(benchmark_report_aggregates_only, false);
 
 // Display the result of each benchmark repetitions. When 'true' is specified
 // only the mean, standard deviation, and other statistics are displayed for
 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
 // the display reporter, but  *NOT* file reporter, which will still contain
 // all the output.
-DEFINE_bool( benchmark_display_aggregates_only, false);
+DEFINE_bool(benchmark_display_aggregates_only, false);
 
 // The format to use for console output.
 // Valid values are 'console', 'json', or 'csv'.
@@ -142,7 +142,7 @@
   // which must be suppressed.
 #if defined(__INTEL_COMPILER)
 #pragma warning push
-#pragma warning(disable:1875)
+#pragma warning(disable : 1875)
 #elif defined(__GNUC__)
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
@@ -309,7 +309,6 @@
 #pragma GCC diagnostic pop
 #endif
 
-
 }  // end namespace
 
 bool IsZero(double n) {
@@ -318,7 +317,7 @@
 
 ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
   int output_opts = ConsoleReporter::OO_Defaults;
-  auto is_benchmark_color = [force_no_color] () -> bool {
+  auto is_benchmark_color = [force_no_color]() -> bool {
     if (force_no_color) {
       return false;
     }
diff --git a/src/commandlineflags.cc b/src/commandlineflags.cc
index 6bd65c5..4e60f0b 100644
--- a/src/commandlineflags.cc
+++ b/src/commandlineflags.cc
@@ -14,6 +14,7 @@
 
 #include "commandlineflags.h"
 
+#include <algorithm>
 #include <cctype>
 #include <cstdlib>
 #include <cstring>
@@ -92,44 +93,40 @@
 
 }  // namespace
 
-// Reads and returns the Boolean environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-//
-// The value is considered true iff it's not "0".
-bool BoolFromEnv(const char* flag, bool default_value) {
+bool BoolFromEnv(const char* flag, bool default_val) {
   const std::string env_var = FlagToEnvVar(flag);
-  const char* const string_value = getenv(env_var.c_str());
-  return string_value == nullptr ? default_value
-                                 : strcmp(string_value, "0") != 0;
+  const char* const value_str = getenv(env_var.c_str());
+  return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
 }
 
-// Reads and returns a 32-bit integer stored in the environment
-// variable corresponding to the given flag; if it isn't set or
-// doesn't represent a valid 32-bit integer, returns default_value.
-int32_t Int32FromEnv(const char* flag, int32_t default_value) {
+int32_t Int32FromEnv(const char* flag, int32_t default_val) {
   const std::string env_var = FlagToEnvVar(flag);
-  const char* const string_value = getenv(env_var.c_str());
-  if (string_value == nullptr) {
-    // The environment variable is not set.
-    return default_value;
+  const char* const value_str = getenv(env_var.c_str());
+  int32_t value = default_val;
+  if (value_str == nullptr ||
+      !ParseInt32(std::string("Environment variable ") + env_var, value_str,
+                  &value)) {
+    return default_val;
   }
-
-  int32_t result = default_value;
-  if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
-                  &result)) {
-    std::cout << "The default value " << default_value << " is used.\n";
-    return default_value;
-  }
-
-  return result;
+  return value;
 }
 
-// Reads and returns the string environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-const char* StringFromEnv(const char* flag, const char* default_value) {
+double DoubleFromEnv(const char* flag, double default_val) {
+  const std::string env_var = FlagToEnvVar(flag);
+  const char* const value_str = getenv(env_var.c_str());
+  double value = default_val;
+  if (value_str == nullptr ||
+      !ParseDouble(std::string("Environment variable ") + env_var, value_str,
+                   &value)) {
+    return default_val;
+  }
+  return value;
+}
+
+const char* StringFromEnv(const char* flag, const char* default_val) {
   const std::string env_var = FlagToEnvVar(flag);
   const char* const value = getenv(env_var.c_str());
-  return value == nullptr ? default_value : value;
+  return value == nullptr ? default_val : value;
 }
 
 // Parses a string as a command line flag.  The string should have
@@ -214,9 +211,18 @@
 }
 
 bool IsTruthyFlagValue(const std::string& value) {
-  if (value.empty()) return true;
-  char ch = value[0];
-  return isalnum(ch) &&
-         !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
+  if (value.size() == 1) {
+    char v = value[0];
+    return isalnum(v) &&
+           !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
+  } else if (!value.empty()) {
+    std::string value_lower(value);
+    std::transform(value_lower.begin(), value_lower.end(),
+                   value_lower.begin(), ::tolower);
+    return !(value_lower == "false" || value_lower == "no" ||
+             value_lower == "off");
+  } else
+    return true;
 }
+
 }  // end namespace benchmark
diff --git a/src/commandlineflags.h b/src/commandlineflags.h
index afe5238..3a1f6a8 100644
--- a/src/commandlineflags.h
+++ b/src/commandlineflags.h
@@ -10,22 +10,51 @@
 // Macros for declaring flags.
 #define DECLARE_bool(name) extern bool FLAG(name)
 #define DECLARE_int32(name) extern int32_t FLAG(name)
-#define DECLARE_int64(name) extern int64_t FLAG(name)
 #define DECLARE_double(name) extern double FLAG(name)
 #define DECLARE_string(name) extern std::string FLAG(name)
 
 // Macros for defining flags.
-#define DEFINE_bool(name, default_val) bool FLAG(name) = (default_val)
-#define DEFINE_int32(name, default_val) int32_t FLAG(name) = (default_val)
-#define DEFINE_int64(name, default_val) int64_t FLAG(name) = (default_val)
-#define DEFINE_double(name, default_val) double FLAG(name) = (default_val)
-#define DEFINE_string(name, default_val) std::string FLAG(name) = (default_val)
+#define DEFINE_bool(name, default_val)            \
+  bool FLAG(name) =                               \
+    benchmark::BoolFromEnv(#name, default_val)
+#define DEFINE_int32(name, default_val)           \
+  int32_t FLAG(name) =                            \
+    benchmark::Int32FromEnv(#name, default_val)
+#define DEFINE_double(name, default_val)          \
+  double FLAG(name) =                             \
+    benchmark::DoubleFromEnv(#name, default_val)
+#define DEFINE_string(name, default_val)          \
+  std::string FLAG(name) =                        \
+    benchmark::StringFromEnv(#name, default_val)
 
 namespace benchmark {
-// Parses a bool/Int32/string from the environment variable
-// corresponding to the given Google Test flag.
+
+// Parses a bool from the environment variable
+// corresponding to the given flag.
+//
+// If the variable exists, returns IsTruthyFlagValue() value;  if not,
+// returns the given default value.
 bool BoolFromEnv(const char* flag, bool default_val);
+
+// Parses an Int32 from the environment variable
+// corresponding to the given flag.
+//
+// If the variable exists, returns ParseInt32() value;  if not, returns
+// the given default value.
 int32_t Int32FromEnv(const char* flag, int32_t default_val);
+
+// Parses an Double from the environment variable
+// corresponding to the given flag.
+//
+// If the variable exists, returns ParseDouble();  if not, returns
+// the given default value.
+double DoubleFromEnv(const char* flag, double default_val);
+
+// Parses a string from the environment variable
+// corresponding to the given flag.
+//
+// If variable exists, returns its value;  if not, returns
+// the given default value.
 const char* StringFromEnv(const char* flag, const char* default_val);
 
 // Parses a string for a bool flag, in the form of either
@@ -64,9 +93,11 @@
 bool IsFlag(const char* str, const char* flag);
 
 // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
-// some non-alphanumeric character. As a special case, also returns true if
-// value is the empty string.
+// some non-alphanumeric character. Also returns false if the value matches
+// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
+// returns true if value is the empty string.
 bool IsTruthyFlagValue(const std::string& value);
+
 }  // end namespace benchmark
 
 #endif  // BENCHMARK_COMMANDLINEFLAGS_H_
diff --git a/src/json_reporter.cc b/src/json_reporter.cc
index 11db2b9..fe7b1be 100644
--- a/src/json_reporter.cc
+++ b/src/json_reporter.cc
@@ -135,7 +135,7 @@
     out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
         << ",\n";
     out << cache_indent
-        << FormatKV("size", static_cast<int64_t>(CI.size) * 1000u) << ",\n";
+        << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
     out << cache_indent
         << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
         << "\n";
diff --git a/test/BUILD b/test/BUILD
index 3f174c4..9bb8cb0 100644
--- a/test/BUILD
+++ b/test/BUILD
@@ -5,7 +5,7 @@
     "-Wall",
     "-Wextra",
     "-Wshadow",
-#    "-Wshorten-64-to-32",
+    #    "-Wshorten-64-to-32",
     "-Wfloat-equal",
     "-fstrict-aliasing",
 ]
@@ -16,13 +16,14 @@
     "donotoptimize_test.cc": ["-O3"],
 })
 
-
 TEST_ARGS = ["--benchmark_min_time=0.01"]
 
 PER_SRC_TEST_ARGS = ({
     "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
 })
 
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+
 cc_library(
     name = "output_test_helper",
     testonly = 1,
@@ -36,24 +37,31 @@
 )
 
 [
-  cc_test(
-    name = test_src[:-len(".cc")],
-    size = "small",
-    srcs = [test_src],
-    args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
-    copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
-    deps = [
-        ":output_test_helper",
-        "//:benchmark",
-        "//:benchmark_internal_headers",
-        "@com_google_googletest//:gtest",
-    ] + (
-        ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
-    ),
-  # FIXME: Add support for assembly tests to bazel.
-  # See Issue #556
-  # https://github.com/google/benchmark/issues/556
-  ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc", "link_main_test.cc"])
+    cc_test(
+        name = test_src[:-len(".cc")],
+        size = "small",
+        srcs = [test_src],
+        args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
+        copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
+        deps = [
+            ":output_test_helper",
+            "//:benchmark",
+            "//:benchmark_internal_headers",
+            "@com_google_googletest//:gtest",
+        ] + (
+            ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
+        ),
+        # FIXME: Add support for assembly tests to bazel.
+        # See Issue #556
+        # https://github.com/google/benchmark/issues/556
+    )
+    for test_src in glob(
+        ["*test.cc"],
+        exclude = [
+            "*_assembly_test.cc",
+            "link_main_test.cc",
+        ],
+    )
 ]
 
 cc_test(
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 030f35a..ddcb1a1 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -54,12 +54,12 @@
 
 # Demonstration executable
 compile_benchmark_test(benchmark_test)
-add_test(benchmark benchmark_test --benchmark_min_time=0.01)
+add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(filter_test)
 macro(add_filter_test name filter expect)
-  add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
-  add_test(${name}_list_only filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
+  add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
+  add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
 endmacro(add_filter_test)
 
 add_filter_test(filter_simple "Foo" 3)
@@ -82,16 +82,16 @@
 add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
 
 compile_benchmark_test(options_test)
-add_test(options_benchmarks options_test --benchmark_min_time=0.01)
+add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(basic_test)
-add_test(basic_benchmark basic_test --benchmark_min_time=0.01)
+add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(diagnostics_test)
-add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01)
+add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(skip_with_error_test)
-add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01)
+add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(donotoptimize_test)
 # Some of the issues with DoNotOptimize only occur when optimization is enabled
@@ -99,49 +99,49 @@
 if (BENCHMARK_HAS_O3_FLAG)
   set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
 endif()
-add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01)
+add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(fixture_test)
-add_test(fixture_test fixture_test --benchmark_min_time=0.01)
+add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(register_benchmark_test)
-add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01)
+add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(map_test)
-add_test(map_test map_test --benchmark_min_time=0.01)
+add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
 
 compile_benchmark_test(multiple_ranges_test)
-add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
+add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
 
 compile_benchmark_test_with_main(link_main_test)
-add_test(link_main_test link_main_test --benchmark_min_time=0.01)
+add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
 
 compile_output_test(reporter_output_test)
-add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
+add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01)
 
 compile_output_test(templated_fixture_test)
-add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01)
+add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01)
 
 compile_output_test(user_counters_test)
-add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
+add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01)
 
 compile_output_test(internal_threading_test)
-add_test(internal_threading_test internal_threading_test --benchmark_min_time=0.01)
+add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01)
 
 compile_output_test(report_aggregates_only_test)
-add_test(report_aggregates_only_test report_aggregates_only_test --benchmark_min_time=0.01)
+add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01)
 
 compile_output_test(display_aggregates_only_test)
-add_test(display_aggregates_only_test display_aggregates_only_test --benchmark_min_time=0.01)
+add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01)
 
 compile_output_test(user_counters_tabular_test)
-add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
+add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
 
 compile_output_test(user_counters_thousands_test)
-add_test(user_counters_thousands_test user_counters_thousands_test --benchmark_min_time=0.01)
+add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01)
 
 compile_output_test(memory_manager_test)
-add_test(memory_manager_test memory_manager_test --benchmark_min_time=0.01)
+add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01)
 
 check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
 if (BENCHMARK_HAS_CXX03_FLAG)
@@ -159,7 +159,7 @@
         PROPERTIES
         LINK_FLAGS "-Wno-odr")
   endif()
-  add_test(cxx03 cxx03_test --benchmark_min_time=0.01)
+  add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01)
 endif()
 
 # Attempt to work around flaky test failures when running on Appveyor servers.
@@ -169,7 +169,7 @@
   set(COMPLEXITY_MIN_TIME "0.01")
 endif()
 compile_output_test(complexity_test)
-add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
+add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
 
 ###############################################################################
 # GoogleTest Unit Tests
@@ -184,7 +184,7 @@
 
   macro(add_gtest name)
     compile_gtest(${name})
-    add_test(${name} ${name})
+    add_test(NAME ${name} COMMAND ${name})
   endmacro()
 
   add_gtest(benchmark_gtest)
diff --git a/test/commandlineflags_gtest.cc b/test/commandlineflags_gtest.cc
index 5460778..36bdb44 100644
--- a/test/commandlineflags_gtest.cc
+++ b/test/commandlineflags_gtest.cc
@@ -34,6 +34,58 @@
   ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
   EXPECT_EQ(BoolFromEnv("in_env", true), false);
   unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", true), false);
+  unsetenv("BENCHMARK_IN_ENV");
 }
 
 TEST(BoolFromEnv, True) {
@@ -41,9 +93,63 @@
   EXPECT_EQ(BoolFromEnv("in_env", false), true);
   unsetenv("BENCHMARK_IN_ENV");
 
-  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0);
   EXPECT_EQ(BoolFromEnv("in_env", false), true);
   unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+
+#ifndef BENCHMARK_OS_WINDOWS
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0);
+  EXPECT_EQ(BoolFromEnv("in_env", false), true);
+  unsetenv("BENCHMARK_IN_ENV");
+#endif
 }
 
 TEST(Int32FromEnv, NotInEnv) {
@@ -54,7 +160,7 @@
 TEST(Int32FromEnv, InvalidInteger) {
   ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
   EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
-  ASSERT_EQ(unsetenv("BENCHMARK_IN_ENV"), 0);
+  unsetenv("BENCHMARK_IN_ENV");
 }
 
 TEST(Int32FromEnv, ValidInteger) {
@@ -63,6 +169,23 @@
   unsetenv("BENCHMARK_IN_ENV");
 }
 
+TEST(DoubleFromEnv, NotInEnv) {
+  ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
+  EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
+}
+
+TEST(DoubleFromEnv, InvalidReal) {
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
+  EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
+  unsetenv("BENCHMARK_IN_ENV");
+}
+
+TEST(DoubleFromEnv, ValidReal) {
+  ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
+  EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
+  unsetenv("BENCHMARK_IN_ENV");
+}
+
 TEST(StringFromEnv, Default) {
   ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
   EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");