[common_lib/test.py]: add append opt to output_perf_value

Add append_mode parameter to determine a new perf report
should be appended if a existing key is existed or not.
If the parameter is set to false, the new perf report will
override any existing value.

BUG=chromium:717664
TEST=autotest_lib.client.common_lib.test_unittest

Change-Id: I54816624cea455408094b66e2d14a1f2783a8c5b
Reviewed-on: https://chromium-review.googlesource.com/511222
Commit-Ready: Po-Hsien Wang <pwang@chromium.org>
Tested-by: Po-Hsien Wang <pwang@chromium.org>
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index b7602a2..177e624 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -91,7 +91,8 @@
 
 
     def output_perf_value(self, description, value, units=None,
-                          higher_is_better=None, graph=None, replacement='_'):
+                          higher_is_better=None, graph=None,
+                          replacement='_', replace_existing_values=False):
         """
         Records a measured performance value in an output file.
 
@@ -125,6 +126,8 @@
                 value should be displayed individually on a separate graph.
         @param replacement: string to replace illegal characters in
                 |description| and |units| with.
+        @param replace_existing_values: A boolean indicating whether or not a
+                new added perf value should replace existing perf.
         """
         if len(description) > 256:
             raise ValueError('The description must be at most 256 characters.')
@@ -184,13 +187,16 @@
             if first_level in charts and second_level in charts[first_level]:
                 if 'values' in charts[first_level][second_level]:
                     result_value = charts[first_level][second_level]['values']
-                    result_value.extend(value)
                 elif 'value' in charts[first_level][second_level]:
                     result_value = [charts[first_level][second_level]['value']]
+                if replace_existing_values:
+                    result_value = value
+                else:
                     result_value.extend(value)
             else:
                 result_value = value
-        elif first_level in charts and second_level in charts[first_level]:
+        elif (first_level in charts and second_level in charts[first_level] and
+              not replace_existing_values):
             result_type = 'list_of_scalar_values'
             value_key = 'values'
             if 'values' in charts[first_level][second_level]:
diff --git a/client/common_lib/test_unittest.py b/client/common_lib/test_unittest.py
index abf1c44..a269207 100755
--- a/client/common_lib/test_unittest.py
+++ b/client/common_lib/test_unittest.py
@@ -398,6 +398,91 @@
                            "improvement_direction": "up"}}}
         self.assertDictEqual(expected_result, json.loads(f.read()))
 
+    def test_output_list_then_replace_list_perf_value(self):
+        self.test.resultsdir = tempfile.mkdtemp()
+        self.test.output_perf_value("Test", [1, 2, 3], units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test", [4, 5, 6], units="ms",
+                                    higher_is_better=False,
+                                    replace_existing_values=True)
+        f = open(self.test.resultsdir + "/results-chart.json")
+        expected_result = {"Test": {"summary": {"units": "ms",
+                           "type": "list_of_scalar_values",
+                           "values": [4, 5, 6],
+                           "improvement_direction": "down"}}}
+        self.assertDictEqual(expected_result, json.loads(f.read()))
+
+    def test_output_single_then_replace_list_perf_value(self):
+        self.test.resultsdir = tempfile.mkdtemp()
+        self.test.output_perf_value("Test", 3, units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test", [4, 5, 6], units="ms",
+                                    higher_is_better=False,
+                                    replace_existing_values=True)
+        f = open(self.test.resultsdir + "/results-chart.json")
+        expected_result = {"Test": {"summary": {"units": "ms",
+                           "type": "list_of_scalar_values",
+                           "values": [4, 5, 6],
+                           "improvement_direction": "down"}}}
+        self.assertDictEqual(expected_result, json.loads(f.read()))
+
+    def test_output_list_then_replace_single_perf_value(self):
+        self.test.resultsdir = tempfile.mkdtemp()
+        self.test.output_perf_value("Test", [1,2,3], units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test", 4, units="ms",
+                                    higher_is_better=False,
+                                    replace_existing_values=True)
+        f = open(self.test.resultsdir + "/results-chart.json")
+        expected_result = {"Test": {"summary": {"units": "ms",
+                           "type": "scalar",
+                           "value": 4,
+                           "improvement_direction": "down"}}}
+        self.assertDictEqual(expected_result, json.loads(f.read()))
+
+    def test_output_single_then_replace_single_perf_value(self):
+        self.test.resultsdir = tempfile.mkdtemp()
+        self.test.output_perf_value("Test", 1, units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test", 2, units="ms",
+                                    higher_is_better=False,
+                                    replace_existing_values=True)
+        f = open(self.test.resultsdir + "/results-chart.json")
+        expected_result = {"Test": {"summary": {"units": "ms",
+                           "type": "scalar",
+                           "value": 2,
+                           "improvement_direction": "down"}}}
+        self.assertDictEqual(expected_result, json.loads(f.read()))
+
+    def test_output_perf_then_replace_certain_perf_value(self):
+        self.test.resultsdir = tempfile.mkdtemp()
+        self.test.output_perf_value("Test1", 1, units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test2", 2, units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test3", 3, units="ms",
+                                    higher_is_better=False)
+        self.test.output_perf_value("Test2", -1, units="ms",
+                                    higher_is_better=False,
+                                    replace_existing_values=True)
+        f = open(self.test.resultsdir + "/results-chart.json")
+        expected_result = {"Test1": {"summary":
+                                       {"units": "ms",
+                                        "type": "scalar",
+                                        "value": 1,
+                                        "improvement_direction": "down"}},
+                           "Test2": {"summary":
+                                       {"units": "ms",
+                                        "type": "scalar",
+                                        "value": -1,
+                                        "improvement_direction": "down"}},
+                           "Test3": {"summary":
+                                       {"units": "ms",
+                                        "type": "scalar",
+                                        "value": 3,
+                                        "improvement_direction": "down"}}}
+        self.assertDictEqual(expected_result, json.loads(f.read()))
+
     def test_chart_supplied(self):
         self.test.resultsdir = tempfile.mkdtemp()