graphics_Drm.py: report Failure Count
Report failure count to chrome perf dashboard as
graphics_gfx regression alerts.
BUG=chromium:717664
TEST=test_that test graphics_Drm
test_that test graphics_Drm.bvt
Change-Id: Ic5ed79692c531bc6b3679372100e12cd4cec26a0
Reviewed-on: https://chromium-review.googlesource.com/518266
Commit-Ready: Po-Hsien Wang <pwang@chromium.org>
Tested-by: Po-Hsien Wang <pwang@chromium.org>
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/client/site_tests/graphics_Drm/control.bvt b/client/site_tests/graphics_Drm/control.bvt
index 3d595d7..e83fd51 100644
--- a/client/site_tests/graphics_Drm/control.bvt
+++ b/client/site_tests/graphics_Drm/control.bvt
@@ -21,7 +21,9 @@
Runs various graphics_Drm related tests.
"""
-job.run_test('graphics_Drm', tests=['drm_cursor_test',
- 'linear_bo_test',
- 'null_platform_test',
- 'swrast_test'])
+job.run_test('graphics_Drm',
+ tests=['drm_cursor_test',
+ 'linear_bo_test',
+ 'null_platform_test',
+ 'swrast_test'],
+ perf_report=True)
diff --git a/client/site_tests/graphics_Drm/graphics_Drm.py b/client/site_tests/graphics_Drm/graphics_Drm.py
index 7099d44..d327473 100644
--- a/client/site_tests/graphics_Drm/graphics_Drm.py
+++ b/client/site_tests/graphics_Drm/graphics_Drm.py
@@ -93,26 +93,24 @@
}
-class graphics_Drm(test.test):
+class graphics_Drm(graphics_utils.GraphicsTest):
"""Runs one, several or all of the drm-tests."""
version = 1
_services = None
- _GSC = None
def initialize(self):
- self._failures = []
- self._GSC = graphics_utils.GraphicsStateChecker()
+ super(graphics_Drm, self).initialize()
self._services = service_stopper.ServiceStopper(['ui'])
self._services.stop_services()
def cleanup(self):
if self._services:
self._services.restore_services()
- if self._GSC:
- self._GSC.finalize()
+ super(graphics_Drm, self).cleanup()
# graphics_Drm runs all available tests if tests = None.
- def run_once(self, tests=None):
+ def run_once(self, tests=None, perf_report=False):
+ self._test_failure_report_enable = perf_report
for test_name in drm_tests:
if tests and test_name not in tests:
continue
@@ -124,13 +122,13 @@
logging.debug('Running test %s.', test_name)
passed = test.run()
if not passed:
- self._failures.append(test_name)
+ self.add_failures(test_name)
else:
logging.info('Failed: test %s can not be run on current '
'configurations.' % test_name)
- self._failures.append(test_name)
+ self.add_failures(test_name)
else:
logging.info('Skipping test: %s.' % test_name)
- if self._failures:
- raise error.TestFail('Failed: %s' % self._failures)
+ if self.get_failures():
+ raise error.TestFail('Failed: %s' % self.get_failures())