merge in nyc-release history after reset to master
diff --git a/client/bin/site_sysinfo.py b/client/bin/site_sysinfo.py
index 7144f8d..e113fa1 100755
--- a/client/bin/site_sysinfo.py
+++ b/client/bin/site_sysinfo.py
@@ -7,7 +7,7 @@
from autotest_lib.client.common_lib import log
from autotest_lib.client.common_lib import error, utils, global_config
-from autotest_lib.client.bin import base_sysinfo
+from autotest_lib.client.bin import base_sysinfo, utils
from autotest_lib.client.cros import constants, tpm_dam
get_value = global_config.global_config.get_config_value
@@ -304,6 +304,9 @@
for log in self.diffable_loggables:
log.run(log_dir=None, collect_init_status=True)
+ # Start each log with the board name for orientation.
+ logging.info("ChromeOS BOARD = %s",
+ utils.get_board_with_frequency_and_memory())
@log.log_and_ignore_errors("post-test sysinfo error:")
def log_after_each_test(self, test):
diff --git a/client/common_lib/site_utils.py b/client/common_lib/site_utils.py
index 76f7fc2..2adcdc4 100644
--- a/client/common_lib/site_utils.py
+++ b/client/common_lib/site_utils.py
@@ -713,9 +713,30 @@
return branch, target, build_id
+def extract_wav_frames(wave_file):
+ """Extract all frames from a WAV file.
+
+ wave_file: A Wave_read object representing a WAV file opened for reading.
+
+ @return: A list containing the frames in the WAV file.
+ """
+ num_frames = wave_file.getnframes()
+ sample_width = wave_file.getsampwidth()
+ if sample_width == 1:
+ fmt = '%iB' # Read 1 byte.
+ elif sample_width == 2:
+ fmt = '%ih' # Read 2 bytes.
+ elif sample_width == 4:
+ fmt = '%ii' # Read 4 bytes.
+ else:
+ raise ValueError('Unsupported sample width')
+ return list(struct.unpack(fmt % num_frames * wave_file.getnchannels(),
+ wave_file.readframes(num_frames)))
+
+
def check_wav_file(filename, num_channels=None, sample_rate=None,
sample_width=None):
- """Checks a WAV file and returns its peak PCM value.
+ """Checks a WAV file and returns its peak PCM values.
@param filename: Input WAV file to analyze.
@param num_channels: Number of channels to expect (None to not check).
@@ -731,29 +752,27 @@
try:
chk_file = wave.open(filename, 'r')
if num_channels is not None and chk_file.getnchannels() != num_channels:
- raise ValueError('Incorrect number of channels')
+ raise ValueError('Expected %d channels but got %d instead.',
+ num_channels, chk_file.getnchannels())
if sample_rate is not None and chk_file.getframerate() != sample_rate:
- raise ValueError('Incorrect sample rate')
+ raise ValueError('Expected sample rate %d but got %d instead.',
+ sample_rate, chk_file.getframerate())
if sample_width is not None and chk_file.getsampwidth() != sample_width:
- raise ValueError('Incorrect sample width')
- num_frames = chk_file.getnframes()
- if chk_file.getsampwidth() == 1:
- fmt = '%iB' # Read 1 byte.
- elif chk_file.getsampwidth() == 2:
- fmt = '%ih' # Read 2 bytes.
- elif chk_file.getsampwidth() == 4:
- fmt = '%il' # Read 4 bytes.
- else:
- raise ValueError('Unsupported sample width')
- frames = struct.unpack(fmt % num_frames * chk_file.getnchannels(),
- chk_file.readframes(num_frames))
+ raise ValueError('Expected sample width %d but got %d instead.',
+ sample_width, chk_file.getsampwidth())
+ frames = extract_wav_frames(chk_file)
except wave.Error as e:
raise ValueError('Error processing WAV file: %s' % e)
finally:
if chk_file is not None:
chk_file.close()
- peaks = [];
+ # Since 8-bit PCM is unsigned with an offset of 128, we subtract the offset
+ # to make it signed since the rest of the code assumes signed numbers.
+ if chk_file.getsampwidth() == 1:
+ frames = [val - 128 for val in frames]
+
+ peaks = []
for i in range(chk_file.getnchannels()):
peaks.append(max(map(abs, frames[i::chk_file.getnchannels()])))
return peaks;
diff --git a/client/site_tests/platform_CryptohomeFio/seq_write_verified b/client/site_tests/platform_CryptohomeFio/seq_write_verified
index 7322211..dd9a122 100644
--- a/client/site_tests/platform_CryptohomeFio/seq_write_verified
+++ b/client/site_tests/platform_CryptohomeFio/seq_write_verified
@@ -24,7 +24,7 @@
; verify=crc32c-intel
verify=crc32
-verify_pattern=ecryptfs
+verify_pattern="ecryptfs"
verify_backlog=1
verifysort=1
verify_fatal=1
diff --git a/client/site_tests/platform_CryptohomeFio/surfing b/client/site_tests/platform_CryptohomeFio/surfing
index 880749b..2b3e428 100644
--- a/client/site_tests/platform_CryptohomeFio/surfing
+++ b/client/site_tests/platform_CryptohomeFio/surfing
@@ -28,6 +28,6 @@
; norandommap
; verify=crc32c-intel
verify=crc32
-verify_pattern=ecryptfs
+verify_pattern="ecryptfs"
verify_backlog=1
stonewall
diff --git a/client/site_tests/policy_ProxySettings/control b/client/site_tests/policy_ProxySettings/control
index 21dbcce..ab17300 100644
--- a/client/site_tests/policy_ProxySettings/control
+++ b/client/site_tests/policy_ProxySettings/control
@@ -15,20 +15,17 @@
Verify effects of policy_ProxySettings policy on client behavior.
This test verifies the effect of the ProxySettings user policy on Chrome OS
-client behavior and appearance. It exercises a range of policy values using
-three unique test cases, named: FixedProxy, NotSet, and TBD. See the test file
-for a full description of what each test case does.
+client behavior. It exercises a variety of policy values using three unique
+test cases: FixedProxy_UseFixedProxy, DirectProxy_UseNoProxy, and
+NotSet_UseNoProxy.
Usage example:
-$ test_that <IPAddress> policy_ProxySettings --args="mode=single
-case=FixedProxy nv=dm-test dms_name=xot-dmst
-username=test@crosprqa1.com password=test1234"
+$ test_that <IPAddress> policy_ProxySettings --args=
+'case=FixedProxy_UseFixedProxy'
-This command runs a single test case (e.g., FixedProxy) using the specified
-test DM Server environment, and signing in with the specified username and
-password.
+This command runs a single test case (e.g., FixedProxy_UseFixedProxy) using
+the default fake DM server.
"""
job.run_test("policy_ProxySettings", args=args)
-
diff --git a/client/site_tests/policy_ProxySettings/policy_ProxySettings.py b/client/site_tests/policy_ProxySettings/policy_ProxySettings.py
index d16bdfb..a4ab44d 100644
--- a/client/site_tests/policy_ProxySettings/policy_ProxySettings.py
+++ b/client/site_tests/policy_ProxySettings/policy_ProxySettings.py
@@ -12,38 +12,40 @@
POLICY_NAME = 'ProxySettings'
PROXY_HOST = 'localhost'
PROXY_PORT = 3128
-FIXED_PROXY = '''
-{
- "ProxyMode":"fixed_servers",
- "ProxyServer":"localhost:%s"
-}
-''' % str(PROXY_PORT)
+FIXED_PROXY = '''{
+ "ProxyBypassList": "www.google.com,www.googleapis.com",
+ "ProxyMode": "fixed_servers",
+ "ProxyServer": "localhost:%s"
+}''' % PROXY_PORT
+DIRECT_PROXY = '''{
+ "ProxyMode": "direct"
+}'''
+TEST_URL = 'http://www.wired.com/'
class ProxyHandler(StreamRequestHandler):
- """Provide a request handler for the Threaded Proxy Server."""
- wbufsize = -1
+ """Provide request handler for the Threaded Proxy Listener."""
def handle(self):
"""Get URL of request from first line.
- Read the first line of the request, up to 40 characters, and look for
- the URL of the request. If found, save it to the URL list.
+ Read the first line of the request, up to 40 characters, and look
+ for the URL of the request. If found, save it to the URL list.
Note: All requests are sent an HTTP 504 error.
"""
- # Read up to 40 characters of the request to capture the request URL
+ # Capture URL in first 40 chars of request.
data = self.rfile.readline(40).strip()
logging.info('ProxyHandler::handle(): <%s>', data)
self.server.store_requests_recieved(data)
- self.wfile.write('HTTP/1.1 504 Gateway Timeout\r\n' +
+ self.wfile.write('HTTP/1.1 504 Gateway Timeout\r\n'
'Connection: close\r\n\r\n')
class ThreadedProxyServer(ThreadingTCPServer):
- """Provide a Threaded TCP Server to service and save requests.
+ """Provide a Threaded Proxy Server to service and save requests.
- Define a Threaded TCP Server which services requests, and allows the
+ Define a Threaded Proxy Server which services requests, and allows the
handler to save all requests.
"""
@@ -53,7 +55,7 @@
@param server_address: tuple of server IP and port to listen on.
@param HandlerClass: the RequestHandler class to instantiate per req.
"""
- self._requests_recieved = []
+ self.reset_requests_received()
ThreadingTCPServer.__init__(self, server_address, HandlerClass)
def store_requests_recieved(self, request):
@@ -67,15 +69,17 @@
"""Get list of received requests."""
return self._requests_recieved
- # TODO(krishnargv) add a method to reset request_recieved_stack
+ def reset_requests_received(self):
+ """Clear list of received requests."""
+ self._requests_recieved = []
class ProxyListener(object):
"""Provide a Proxy Listener to detect connect requests.
- Defines fake listener for tracking whether an expected CONNECT request is
- seen at the provided server address. Any requests recieved are exposed to
- be consumed by the caller.
+ Define a proxy listener to detect when a CONNECT request is seen at the
+ given |server_address|, and record all requests received. Requests
+ recieved are exposed to the caller.
"""
def __init__(self, server_address):
@@ -87,7 +91,7 @@
self._thread = threading.Thread(target=self._server.serve_forever)
def run(self):
- """Run the server on a thread."""
+ """Start the server by activating it's thread."""
self._thread.start()
def stop(self):
@@ -96,18 +100,40 @@
self._server.socket.close()
self._thread.join()
+ def store_requests_recieved(self, request):
+ """Add receieved request to list.
+
+ @param request: request received by the proxy server.
+ """
+ self._requests_recieved.append(request)
+
def get_requests_recieved(self):
+ """Get list of received requests."""
return self._server.get_requests_recieved()
+ def reset_requests_received(self):
+ """Clear list of received requests."""
+ self._server.reset_requests_received()
+
class policy_ProxySettings(enterprise_policy_base.EnterprisePolicyTest):
- """Test effect of ProxySettings policy on Chrome OS behavior."""
+ """Test effect of ProxySettings policy on Chrome OS behavior.
+
+ This test verifies the behavior of Chrome OS for specific configurations
+ of the ProxySettings use policy: None (undefined), ProxyMode=direct,
+ ProxyMode=fixed_servers. None means that the policy value is not set. This
+ induces the default behavior, equivalent to what is seen by an un-managed
+ user.
+
+ When ProxySettings is None (undefined), or ProxyMode=direct, then no proxy
+ server should be used. When ProxyMode=fixed_servers, then the proxy server
+ address specified by the ProxyServer entry should be used.
+ """
version = 1
TEST_CASES = {
- 'FixedProxy': '1'
- }
- TEST_CASE_DATA = {
- 'FixedProxy': FIXED_PROXY
+ 'FixedProxy_UseFixedProxy': FIXED_PROXY,
+ 'DirectProxy_UseNoProxy': DIRECT_PROXY,
+ 'NotSet_UseNoProxy': None
}
def initialize(self, args=()):
@@ -119,56 +145,57 @@
self._proxy_server.stop()
super(policy_ProxySettings, self).cleanup()
- def test_fixed_proxy(self, policy_value, policies_json):
- """Verify CrOS enforces ProxySettings value = fixed-proxy.
+ def _test_proxy_configuration(self, policy_value, policies_json):
+ """Verify CrOS enforces the specified ProxySettings configuration.
@param policy_value: policy value expected on chrome://policy page.
@param policies_json: policy JSON data to send to the fake DM server.
"""
- proxy_server_requests = []
- matching_requests = []
- url = 'http://www.wired.com/'
-
+ logging.info('Running _test_proxy_configuration(%s, %s)',
+ policy_value, policies_json)
self.setup_case(POLICY_NAME, policy_value, policies_json)
- tab = self.cr.browser.tabs.New()
- logging.info('Navigating to URL:%s', url)
- tab.Navigate(url, timeout=10)
- proxy_server_requests = self._proxy_server.get_requests_recieved()
- matching_requests = [request for request in proxy_server_requests
- if url in request]
- if not matching_requests:
- raise error.TestFail('Fixed Proxy Policy not applied')
- def _run_test_case(self, case):
+ self._proxy_server.reset_requests_received()
+ self.navigate_to_url(TEST_URL)
+ proxied_requests = self._proxy_server.get_requests_recieved()
+
+ # Determine whether TEST_URL is in |proxied_requests|. Comprehension
+ # is conceptually equivalent to `TEST_URL in proxied_requests`;
+ # however, we must do partial matching since TEST_URL and the
+ # elements inside |proxied_requests| are not necessarily equal, i.e.,
+ # TEST_URL is a substring of the received request.
+ matching_requests = [request for request in proxied_requests
+ if TEST_URL in request]
+ logging.info('matching_requests: %s', matching_requests)
+
+ if policy_value is None or 'direct' in policy_value:
+ if matching_requests:
+ raise error.TestFail('Requests should not have been sent '
+ 'through the proxy server.')
+ elif 'fixed_servers' in policy_value:
+ if not matching_requests:
+ raise error.TestFail('Requests should have been sent '
+ 'through the proxy server.')
+
+ def run_test_case(self, case):
"""Setup and run the test configured for the specified test case.
Set the expected |policy_value| and |policies_json| data based on the
- test |case|. If the user specified an expected |value| in the command
- line args, then use it to set the |policy_value| and blank out the
- |policies_json|.
+ test |case|. If the user gave an expected |value| on the command line,
+ then set |policy_value| to |value|, and |policies_json| to None.
@param case: Name of the test case to run.
"""
- policy_value = None
- policies_json = None
-
if self.is_value_given:
- # If |value| was given i the command line args, then set expected
+ # If |value| was given in the command line args, then set expected
# |policy_value| to the given value, and |policies_json| to None.
policy_value = self.value
policies_json = None
else:
# Otherwise, set expected |policy_value| and setup |policies_json|
# data to the values required by the specified test |case|.
- if not self.TEST_CASES[case]:
- policy_value = None
- else:
- policy_value = self.TEST_CASE_DATA[case]
- policies_json = {POLICY_NAME: self.TEST_CASE_DATA[case]}
+ policy_value = self.TEST_CASES[case]
+ policies_json = {POLICY_NAME: self.TEST_CASES[case]}
- if case == 'FixedProxy':
- self.test_fixed_proxy(policy_value, policies_json)
-
- def run_once(self):
- self.run_once_impl(self._run_test_case)
+ self._test_proxy_configuration(policy_value, policies_json)
diff --git a/CODING_STYLE b/docs/coding-style.md
similarity index 100%
rename from CODING_STYLE
rename to docs/coding-style.md
diff --git a/frontend/afe/site_rpc_interface.py b/frontend/afe/site_rpc_interface.py
index 074e438..5b625a5 100644
--- a/frontend/afe/site_rpc_interface.py
+++ b/frontend/afe/site_rpc_interface.py
@@ -574,15 +574,18 @@
@rpc_utils.route_rpc_to_master
-def get_stable_version(board=stable_version_utils.DEFAULT):
+def get_stable_version(board=stable_version_utils.DEFAULT, android=False):
"""Get stable version for the given board.
@param board: Name of the board.
+ @param android: If True, the given board is an Android-based device. If
+ False, assume its a Chrome OS-based device.
+
@return: Stable version of the given board. Return global configure value
of CROS.stable_cros_version if stable_versinos table does not have
entry of board DEFAULT.
"""
- return stable_version_utils.get(board)
+ return stable_version_utils.get(board=board, android=android)
@rpc_utils.route_rpc_to_master
diff --git a/global_config.ini b/global_config.ini
index c3898b4..5d609e7 100644
--- a/global_config.ini
+++ b/global_config.ini
@@ -381,3 +381,4 @@
[ANDROID]
image_url_pattern: %s/static/%s
+stable_version_dragonboard: git_mnc-brillo-dev/dragonboard-userdebug/2512766
diff --git a/server/afe_utils.py b/server/afe_utils.py
index d609838..1f13c0e 100644
--- a/server/afe_utils.py
+++ b/server/afe_utils.py
@@ -2,7 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Utility functions for AFE-based interactions."""
+"""Utility functions for AFE-based interactions.
+
+NOTE: This module should only be used in the context of a running test. Any
+ utilities that require accessing the AFE, should do so by creating
+ their own instance of the AFE client and interact with it directly.
+"""
import common
from autotest_lib.server import utils
@@ -23,7 +28,7 @@
@returns The host model object.
"""
- if not host.job.in_lab:
+ if not host.job or not host.job.in_lab:
return False
return AFE.get_hosts(hostname=host.hostname)
@@ -43,6 +48,20 @@
return utils.get_build_from_afe(host.hostname, AFE)
+def get_board(host):
+ """Retrieve the board for a given host from the AFE.
+
+ Contacts the AFE to retrieve the board for a given host.
+
+ @param host: Host object to get board.
+
+ @returns The current board or None if it could not find it.
+ """
+ if not host_in_lab(host):
+ return None
+ return utils.get_board_from_afe(host.hostname, AFE)
+
+
def clear_version_labels(host):
"""Clear version labels for a given host.
@@ -82,3 +101,17 @@
clear_version_labels(host)
image_name = host.machine_install(*args, **dargs)
add_version_label(host, image_name)
+
+
+def get_stable_version(board, android=False):
+ """Retrieves a board's stable version from the AFE.
+
+ @param board: Board to lookup.
+ @param android: If True, indicates we are looking up a Android/Brillo-based
+ board. There is no default version that works for all
+ Android/Brillo boards. If False, we are looking up a Chrome
+ OS based board.
+
+ @returns Stable version of the given board.
+ """
+ return AFE.run('get_stable_version', board=board, android=android)
diff --git a/server/brillo/feedback/closed_loop_audio_client.py b/server/brillo/feedback/closed_loop_audio_client.py
index 9f75f0b..c1f71bb 100644
--- a/server/brillo/feedback/closed_loop_audio_client.py
+++ b/server/brillo/feedback/closed_loop_audio_client.py
@@ -5,8 +5,10 @@
"""Feedback implementation for audio with closed-loop cable."""
import logging
+import numpy
import os
import tempfile
+import wave
import common
from autotest_lib.client.common_lib import error
@@ -15,16 +17,6 @@
from autotest_lib.server.brillo import host_utils
-def _max_volume(sample_width):
- """Returns the maximum possible volume.
-
- This is the highest absolute value of a signed integer of a given width.
-
- @param sample_width: The sample width in bytes.
- """
- return (1 << (sample_width * 8 - 1))
-
-
# Constants used for updating the audio policy.
#
_DUT_AUDIO_POLICY_PATH = 'system/etc/audio_policy.conf'
@@ -38,14 +30,40 @@
#
_REC_FILENAME = 'rec_file.wav'
_REC_DURATION = 10
+
# Number of channels to record.
-_NUM_CHANNELS = 1
+_DEFAULT_NUM_CHANNELS = 1
# Recording sample rate (48kHz).
-_SAMPLE_RATE = 48000
+_DEFAULT_SAMPLE_RATE = 48000
# Recording sample format is signed 16-bit PCM (two bytes).
-_SAMPLE_WIDTH = 2
+_DEFAULT_SAMPLE_WIDTH = 2
+
# The peak when recording silence is 5% of the max volume.
-_SILENCE_MAX = _max_volume(_SAMPLE_WIDTH) / 20
+_SILENCE_THRESHOLD = 0.05
+
+# Thresholds used when comparing files.
+#
+# The frequency threshold used when comparing files. The frequency of the
+# recorded audio has to be within _FREQUENCY_THRESHOLD percent of the frequency
+# of the original audio.
+_FREQUENCY_THRESHOLD = 0.01
+# Noise threshold controls how much noise is allowed as a fraction of the
+# magnitude of the peak frequency after taking an FFT. The power of all the
+# other frequencies in the signal should be within _FFT_NOISE_THRESHOLD percent
+# of the power of the main frequency.
+_FFT_NOISE_THRESHOLD = 0.05
+
+
+def _max_volume(sample_width):
+ """Returns the maximum possible volume.
+
+ This is the highest absolute value of an integer of a given width.
+ If the sample width is one, then we assume an unsigned intger. For all other
+ sample sizes, we assume that the format is signed.
+
+ @param sample_width: The sample width in bytes.
+ """
+ return (1 << 8) if sample_width == 1 else (1 << (sample_width * 8 - 1))
class Client(client.Client):
@@ -65,8 +83,6 @@
self.dut_tmp_dir = None
self.tmp_dir = None
self.orig_policy = None
- # By default, the audible threshold is equivalent to the silence cap.
- self.audible_threshold = _SILENCE_MAX
def set_audible_threshold(self, threshold):
@@ -107,7 +123,8 @@
policy_changed = True
if _WIRED_HEADSET_IN not in line:
if _AUDIO_POLICY_ATTACHED_INPUT_DEVICES in line:
- line = '%s|%s\n' % (line.rstrip(), _WIRED_HEADSET_IN)
+ line = '%s|%s\n' % (line.rstrip(),
+ _WIRED_HEADSET_IN)
policy_changed = True
test_file.write(line)
@@ -185,13 +202,12 @@
self.recording_pid = None
- def _process_recording(self):
- """Waits for recording to finish and processes the result.
+ def _get_local_rec_filename(self):
+ """Waits for recording to finish and copies the file to the host.
- @return A list of the highest recorded peak value for each channel.
+ @return A string of the local filename containing the recorded audio.
@raise error.TestError: Error while validating the recording.
- @raise error.TestFail: Recording file failed to validate.
"""
# Wait for recording to finish.
timeout = _REC_DURATION + 5
@@ -202,29 +218,38 @@
_, local_rec_filename = tempfile.mkstemp(
prefix='recording-', suffix='.wav', dir=self.local_tmp_dir)
- try:
- self.client.host.get_file(self.dut_rec_filename,
- local_rec_filename, delete_dest=True)
- return site_utils.check_wav_file(local_rec_filename,
- num_channels=_NUM_CHANNELS,
- sample_rate=_SAMPLE_RATE,
- sample_width=_SAMPLE_WIDTH)
- except ValueError as e:
- raise error.TestFail('Invalid file attributes: %s' % e)
+ self.client.host.get_file(self.dut_rec_filename,
+ local_rec_filename, delete_dest=True)
+ return local_rec_filename
# Implementation overrides.
#
- def _prepare_impl(self):
- """Implementation of query preparation logic."""
+ def _prepare_impl(self,
+ sample_width=_DEFAULT_SAMPLE_WIDTH,
+ sample_rate=_DEFAULT_SAMPLE_RATE,
+ num_channels=_DEFAULT_NUM_CHANNELS,
+ duration_secs=_REC_DURATION):
+ """Implementation of query preparation logic.
+
+ @sample_width: Sample width to record at.
+ @sample_rate: Sample rate to record at.
+ @num_channels: Number of channels to record at.
+ @duration_secs: Duration (in seconds) to record for.
+ """
+ self.num_channels = num_channels
+ self.sample_rate = sample_rate
+ self.sample_width = sample_width
self.dut_rec_filename = os.path.join(self.client.dut_tmp_dir,
_REC_FILENAME)
self.local_tmp_dir = tempfile.mkdtemp(dir=self.client.tmp_dir)
# Trigger recording in the background.
# TODO(garnold) Remove 'su root' once b/25663983 is resolved.
- cmd = ('su root slesTest_recBuffQueue -d%d %s' %
- (_REC_DURATION, self.dut_rec_filename))
+ cmd = ('su root slesTest_recBuffQueue -c%d -d%d -r%d -%d %s' %
+ (num_channels, duration_secs, sample_rate, sample_width,
+ self.dut_rec_filename))
+ logging.info("Recording cmd: %s", cmd)
self.recording_pid = host_utils.run_in_background(self.client.host, cmd)
@@ -239,19 +264,29 @@
#
def _validate_impl(self):
"""Implementation of query validation logic."""
- silence_peaks = self._process_recording()
+ local_rec_filename = self._get_local_rec_filename()
+ try:
+ silence_peaks = site_utils.check_wav_file(
+ local_rec_filename,
+ num_channels=self.num_channels,
+ sample_rate=self.sample_rate,
+ sample_width=self.sample_width)
+ except ValueError as e:
+ raise error.TestFail('Invalid file attributes: %s' % e)
+
silence_peak = max(silence_peaks)
# Fail if the silence peak volume exceeds the maximum allowed.
- if silence_peak > _SILENCE_MAX:
- logging.error('Silence peak level (%d) exceeds the max allowed (%d)',
- silence_peak, _SILENCE_MAX)
+ max_vol = _max_volume(self.sample_width) * _SILENCE_THRESHOLD
+ if silence_peak > max_vol:
+ logging.error('Silence peak level (%d) exceeds the max allowed '
+ '(%d)', silence_peak, max_vol)
raise error.TestFail('Environment is too noisy')
# Update the client audible threshold, if so instructed.
audible_threshold = silence_peak * 15
logging.info('Silent peak level (%d) is below the max allowed (%d); '
'setting audible threshold to %d',
- silence_peak, _SILENCE_MAX, audible_threshold)
+ silence_peak, max_vol, audible_threshold)
self.client.set_audible_threshold(audible_threshold)
@@ -262,16 +297,18 @@
super(AudiblePlaybackAudioQuery, self).__init__(client)
- # Implementation overrides.
- #
- def _validate_impl(self, audio_file=None):
- """Implementation of query validation logic."""
- # TODO(garnold) This currently ignores the audio_file argument entirely
- # and just ensures that peak levels look reasonable. We should probably
- # compare actual audio content.
+ def _check_peaks(self):
+ """Ensure that peak recording volume exceeds the threshold."""
+ local_rec_filename = self._get_local_rec_filename()
+ try:
+ audible_peaks = site_utils.check_wav_file(
+ local_rec_filename,
+ num_channels=self.num_channels,
+ sample_rate=self.sample_rate,
+ sample_width=self.sample_width)
+ except ValueError as e:
+ raise error.TestFail('Invalid file attributes: %s' % e)
- # Ensure that peak recording volume exceeds the threshold.
- audible_peaks = self._process_recording()
min_channel, min_audible_peak = min(enumerate(audible_peaks),
key=lambda p: p[1])
if min_audible_peak < self.client.audible_threshold:
@@ -289,6 +326,109 @@
min_audible_peak, self.client.audible_threshold)
+ def _is_outside_frequency_threshold(self, freq_golden, freq_rec):
+ """Compares the frequency of the recorded audio with the golden audio.
+
+ This function checks to see if the frequencies corresponding to the peak
+ FFT values are similiar meaning that the dominant frequency in the audio
+ signal is the same for the recorded audio as that in the audio played.
+
+ @freq_golden: The dominant frequency in the reference audio file.
+ @freq_rec: The dominant frequency in the recorded audio file.
+
+ @returns: True is freq_rec is with _FREQUENCY_THRESHOLD percent of
+ freq_golden.
+ """
+ ratio = float(freq_rec) / freq_golden
+ if ratio > 1 + _FREQUENCY_THRESHOLD or ratio < 1 - _FREQUENCY_THRESHOLD:
+ return True
+ return False
+
+
+ def _compare_file(self, audio_file):
+ """Compares the recorded audio file to the golden audio file.
+
+ This method checks for two things:
+ 1. That the main frequency is the same in both the files. This is done
+ using the FFT and observing the frequency corresponding to the
+ peak.
+ 2. That there is no other dominant frequency in the recorded file.
+ This is done by sweeping the frequency domain and checking that the
+ frequency is always less than _FFT_NOISE_THRESHOLD percentage of
+ the peak.
+
+ The key assumption here is that the reference audio file contains only
+ one frequency.
+
+ @param audio_file: Reference audio file containing the golden signal.
+
+ @raise error.TestFail: The frequency of the recorded signal doesn't
+ match that of the golden signal.
+ @raise error.TestFail: There is too much noise in the recorded signal.
+ """
+ local_rec_filename = self._get_local_rec_filename()
+
+ # Open both files and extract data.
+ golden_file = wave.open(audio_file, 'rb')
+ golden_file_frames = site_utils.extract_wav_frames(golden_file)
+ rec_file = wave.open(local_rec_filename, 'rb')
+ rec_file_frames = site_utils.extract_wav_frames(rec_file)
+
+ num_channels = golden_file.getnchannels()
+ for channel in range(num_channels):
+ golden_data = golden_file_frames[channel::num_channels]
+ rec_data = rec_file_frames[channel::num_channels]
+
+ # Get fft and frequencies corresponding to the fft values.
+ fft_golden = numpy.fft.rfft(golden_data)
+ fft_rec = numpy.fft.rfft(rec_data)
+ fft_freqs_golden = numpy.fft.rfftfreq(
+ len(golden_data), 1.0 / golden_file.getframerate())
+ fft_freqs_rec = numpy.fft.rfftfreq(len(rec_data),
+ 1.0 / rec_file.getframerate())
+
+ # Get frequency at highest peak.
+ freq_golden = fft_freqs_golden[numpy.argmax(numpy.abs(fft_golden))]
+ abs_fft_rec = numpy.abs(fft_rec)
+ freq_rec = fft_freqs_rec[numpy.argmax(abs_fft_rec)]
+
+ # Compare the two frequencies.
+ logging.info('Golden frequency = %f', freq_golden)
+ logging.info('Recorded frequency = %f', freq_rec)
+ if self._is_outside_frequency_threshold(freq_golden, freq_rec):
+ raise error.TestFail('The recorded audio frequency does not '
+ 'match that of the audio played.')
+
+ # Check for noise in the frequency domain.
+ fft_rec_peak_val = numpy.max(abs_fft_rec)
+ noise_detected = False
+ for fft_index, fft_val in enumerate(abs_fft_rec):
+ if self._is_outside_frequency_threshold(freq_golden, freq_rec):
+ # If the frequency exceeds _FFT_NOISE_THRESHOLD, then fail
+ # the test.
+ if fft_val > _FFT_NOISE_THRESHOLD * fft_rec_peak_val:
+ logging.warning('Unexpected frequency peak detected at '
+ '%f Hz.', fft_freqs_rec[fft_index])
+ noise_detected = True
+
+ if noise_detected:
+ raise error.TestFail('Signal is noiser than expected.')
+
+
+ # Implementation overrides.
+ #
+ def _validate_impl(self, audio_file=None):
+ """Implementation of query validation logic.
+
+ @audio_file: File to compare recorded audio to.
+ """
+ self._check_peaks()
+ # If the reference audio file is available, then perform an additional
+ # check.
+ if audio_file:
+ self._compare_file(audio_file)
+
+
class RecordingAudioQuery(client.InputQuery):
"""Implementation of a recording query."""
@@ -297,7 +437,7 @@
self.client = client
- def _prepare_impl(self):
+ def _prepare_impl(self, **kwargs):
"""Implementation of query preparation logic (no-op)."""
pass
diff --git a/server/control_segments/repair b/server/control_segments/repair
index da379f6..5797786 100644
--- a/server/control_segments/repair
+++ b/server/control_segments/repair
@@ -18,9 +18,9 @@
target = hosts.create_target_machine(machine, initialize=False,
auto_monitor=False,
try_lab_servo=True)
- # We don't need to collect logs or crash info if we're a testbed since
- # they're not applicable (yet).
- if not utils.machine_is_testbed(machine):
+ # We don't need to collect logs or crash info if we're an ADBHost or
+ # testbed since they're not applicable (yet).
+ if isinstance(target, hosts.CrosHost):
# Collect logs before the repair, as it might destroy all
# useful logs.
local_log_dir = os.path.join(job.resultdir, hostname,
diff --git a/server/cros/servo/pd_console.py b/server/cros/servo/pd_console.py
index 95838f0..6885d32 100644
--- a/server/cros/servo/pd_console.py
+++ b/server/cros/servo/pd_console.py
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import re
+import logging
import time
from autotest_lib.client.common_lib import error
@@ -23,6 +24,7 @@
SRC_DISC = 'SRC_DISCONNECTED'
SNK_DISC = 'SNK_DISCONNECTED'
PD_MAX_PORTS = 2
+ CONNECT_TIME = 4
# dualrole input/ouput values
DUALROLE_QUERY_DELAY = 0.25
@@ -270,3 +272,78 @@
state = self.get_pd_state(port)
return bool(state == self.SRC_CONNECT or state == self.SNK_CONNECT)
+ def is_pd_dual_role_enabled(self):
+ """Check if a PD device is in dualrole mode
+
+ @returns True is dualrole mode is active, false otherwise
+ """
+ drp = self.get_pd_dualrole()
+ return bool(drp == self.dualrole_resp[self.dual_index['on']])
+
+
+class PDConnectionUtils(PDConsoleUtils):
+ """Provides a set of methods common to USB PD FAFT tests
+
+ This Class is used for PD utility methods that require access
+ to both Plankton and DUT PD consoles.
+
+ """
+
+ def __init__(self, dut_console, plankton_console):
+ """
+ @param dut_console: PD console object for DUT
+ @param plankton_console: PD console object for Plankton
+ """
+ # save console for DUT PD UART access functions
+ self.dut_console = dut_console
+ # save console for Plankton UART access functions
+ self.plankton_console = plankton_console
+ super(PDConnectionUtils, self).__init__(dut_console)
+
+ def _verify_plankton_connection(self, port):
+ """Verify DUT to Plankton PD connection
+
+ This method checks for a Plankton PD connection for the
+ given port by first verifying if a PD connection is present.
+ If found, then it uses a Plankton feature to force a PD disconnect.
+ If the port is no longer in the connected state, and following
+ a delay, is found to be back in the connected state, then
+ a DUT pd to Plankton connection is verified.
+
+ @param port: DUT pd port to test
+
+ @returns True if DUT to Plankton pd connection is verified
+ """
+ DISCONNECT_CHECK_TIME = 0.5
+ DISCONNECT_TIME_SEC = 2
+ # plankton console command to force PD disconnect
+ disc_cmd = 'fake_disconnect 100 %d' % (DISCONNECT_TIME_SEC * 1000)
+ # Only check for Plankton if DUT has active PD connection
+ if self.dut_console.is_pd_connected(port):
+ # Attempt to force PD disconnection
+ self.plankton_console.send_pd_command(disc_cmd)
+ time.sleep(DISCONNECT_CHECK_TIME)
+ # Verify that DUT PD port is no longer connected
+ if self.dut_console.is_pd_connected(port) == False:
+ # Wait for disconnect timer and give time to reconnect
+ time.sleep(self.dut_console.CONNECT_TIME + DISCONNECT_TIME_SEC)
+ if self.dut_console.is_pd_connected(port):
+ logging.info('Plankton connection verfied on port %d', port)
+ return True
+ else:
+ # Could have disconnected other port, allow it to reconnect
+ # before exiting.
+ time.sleep(self.dut_console.CONNECT_TIME + DISCONNECT_TIME_SEC)
+ return False
+
+ def find_dut_to_plankton_connection(self):
+ """Find the PD port which is connected to Plankton
+
+ @returns DUT pd port number if found, None otherwise
+ """
+ for port in xrange(self.dut_console.PD_MAX_PORTS):
+ # Check for DUT to Plankton connection on port
+ if self._verify_plankton_connection(port):
+ # Plankton PD connection found so exit
+ return port
+ return None
diff --git a/server/hosts/__init__.py b/server/hosts/__init__.py
index e16dc5d1a..5887b15 100644
--- a/server/hosts/__init__.py
+++ b/server/hosts/__init__.py
@@ -20,6 +20,7 @@
from cros_host import CrosHost
from chameleon_host import ChameleonHost
from servo_host import ServoHost
+from testbed import TestBed
# bootloader classes
from bootloader import Bootloader
diff --git a/server/hosts/adb_host.py b/server/hosts/adb_host.py
index 4df58af..8503c57 100644
--- a/server/hosts/adb_host.py
+++ b/server/hosts/adb_host.py
@@ -202,7 +202,14 @@
msg += ', fastboot serial: %s' % self.fastboot_serial
logging.debug(msg)
- self._reset_adbd_connection()
+ # Try resetting the ADB daemon on the device, however if we are
+ # creating the host to do a repair job, the device maybe inaccesible
+ # via ADB.
+ try:
+ self._reset_adbd_connection()
+ except (error.AutotestHostRunError, error.AutoservRunError) as e:
+ logging.error('Unable to reset the device adb daemon connection: '
+ '%s.', e)
self._os_type = None
@@ -664,6 +671,22 @@
return
+ def repair(self):
+ """Attempt to get the DUT to pass `self.verify()`."""
+ try:
+ self.ensure_adb_mode(timeout=30)
+ return
+ except error.AutoservError as e:
+ logging.error(e)
+ logging.debug('Verifying the device is accessible via fastboot.')
+ self.ensure_bootloader_mode()
+ if not self.job.run_test(
+ 'provision_AndroidUpdate', host=self, value=None,
+ force=True, repair=True):
+ raise error.AutoservRepairTotalFailure(
+ 'Unable to repair the device.')
+
+
def send_file(self, source, dest, delete_dest=False,
preserve_symlinks=False):
"""Copy files from the drone to the device.
@@ -1059,7 +1082,7 @@
raise
- def stage_build_for_install(self, build_name):
+ def stage_build_for_install(self, build_name, os_type=None):
"""Stage a build on a devserver and return the build_url and devserver.
@param build_name: a name like git-master/shamu-userdebug/2040953
@@ -1068,12 +1091,13 @@
http://172.22.50.122:8080/git-master/shamu-userdebug/2040953
and the devserver instance.
"""
+ os_type = os_type or self.get_os_type()
logging.info('Staging build for installation: %s', build_name)
devserver = dev_server.AndroidBuildServer.resolve(build_name,
self.hostname)
build_name = devserver.translate(build_name)
branch, target, build_id = utils.parse_android_build(build_name)
- is_brillo = self.get_os_type() == OS_TYPE_BRILLO
+ is_brillo = os_type == OS_TYPE_BRILLO
devserver.trigger_download(target, build_id, branch, is_brillo,
synchronous=False)
return '%s/static/%s' % (devserver.url(), build_name), devserver
@@ -1195,7 +1219,7 @@
def machine_install(self, build_url=None, build_local_path=None, wipe=True,
- flash_all=False):
+ flash_all=False, os_type=None):
"""Install the DUT.
@param build_url: The url to use for downloading Android artifacts.
@@ -1210,14 +1234,15 @@
@returns Name of the image installed.
"""
+ os_type = os_type or self.get_os_type()
if not build_url and self._parser.options.image:
build_url, _ = self.stage_build_for_install(
- self._parser.options.image)
- if self.get_os_type() == OS_TYPE_ANDROID:
+ self._parser.options.image, os_type=os_type)
+ if os_type == OS_TYPE_ANDROID:
self.install_android(
build_url=build_url, build_local_path=build_local_path,
wipe=wipe, flash_all=flash_all)
- elif self.get_os_type() == OS_TYPE_BRILLO:
+ elif os_type == OS_TYPE_BRILLO:
self.install_brillo(
build_url=build_url, build_local_path=build_local_path)
else:
diff --git a/server/hosts/cros_host.py b/server/hosts/cros_host.py
index 48c6dd0..0ea3421 100644
--- a/server/hosts/cros_host.py
+++ b/server/hosts/cros_host.py
@@ -365,7 +365,7 @@
'cannot be repaired.')
if image_type != 'cros':
board = '%s/%s' % (board, image_type)
- stable_version = self._AFE.run('get_stable_version', board=board)
+ stable_version = afe_utils.get_stable_version(board=board)
if image_type == 'cros':
build_pattern = CONFIG.get_config_value(
'CROS', 'stable_build_pattern')
@@ -2405,6 +2405,7 @@
version_string = self.run(client_constants.CHROME_VERSION_COMMAND).stdout
return utils.parse_chrome_version(version_string)
+
@label_decorator()
def get_board(self):
"""Determine the correct board label for this host.
@@ -2413,14 +2414,8 @@
"""
release_info = utils.parse_cmd_output('cat /etc/lsb-release',
run_method=self.run)
- board = release_info['CHROMEOS_RELEASE_BOARD']
- # Devices in the lab generally have the correct board name but our own
- # development devices have {board_name}-signed-{key_type}. The board
- # name may also begin with 'x86-' which we need to keep.
- board_format_string = ds_constants.BOARD_PREFIX + '%s'
- if 'x86' not in board:
- return board_format_string % board.split('-')[0]
- return board_format_string % '-'.join(board.split('-')[0:2])
+ return (ds_constants.BOARD_PREFIX +
+ release_info['CHROMEOS_RELEASE_BOARD'])
@label_decorator('lightsensor')
diff --git a/server/hosts/factory.py b/server/hosts/factory.py
index 458f77e..b820933 100644
--- a/server/hosts/factory.py
+++ b/server/hosts/factory.py
@@ -84,14 +84,15 @@
return cros_host.CrosHost
-def _choose_connectivity_class(hostname):
+def _choose_connectivity_class(hostname, ssh_port):
"""Choose a connectivity class for this hostname.
@param hostname: hostname that we need a connectivity class for.
+ @param ssh_port: SSH port to connect to the host.
@returns a connectivity host class.
"""
- if hostname == 'localhost':
+ if (hostname == 'localhost' and ssh_port == DEFAULT_SSH_PORT):
return local_host.LocalHost
# by default assume we're using SSH support
elif SSH_ENGINE == 'paramiko':
@@ -137,13 +138,14 @@
ssh_user, ssh_pass, ssh_port, ssh_verbosity_flag, ssh_options = \
_get_host_arguments()
- hostname, args['user'], args['password'], args['port'] = \
+ hostname, args['user'], args['password'], ssh_port = \
server_utils.parse_machine(hostname, ssh_user, ssh_pass, ssh_port)
args['ssh_verbosity_flag'] = ssh_verbosity_flag
args['ssh_options'] = ssh_options
+ args['port'] = ssh_port
if not connectivity_class:
- connectivity_class = _choose_connectivity_class(hostname)
+ connectivity_class = _choose_connectivity_class(hostname, ssh_port)
host_attributes = args.get('host_attributes', {})
host_class = host_class or OS_HOST_DICT.get(host_attributes.get('os_type'))
if host_class:
diff --git a/server/server_job.py b/server/server_job.py
index 0867405..9ea0b60 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -1140,6 +1140,7 @@
namespace['autotest'].Autotest.job = self
# server.hosts.base_classes.Host uses .job.
namespace['hosts'].Host.job = self
+ namespace['hosts'].TestBed.job = self
namespace['hosts'].factory.ssh_user = self._ssh_user
namespace['hosts'].factory.ssh_port = self._ssh_port
namespace['hosts'].factory.ssh_pass = self._ssh_pass
diff --git a/server/site_tests/audio_AudioBasicHDMI/control b/server/site_tests/audio_AudioBasicHDMI/control
index 481f88a..63f0fc4 100644
--- a/server/site_tests/audio_AudioBasicHDMI/control
+++ b/server/site_tests/audio_AudioBasicHDMI/control
@@ -14,7 +14,7 @@
TEST_CATEGORY = "Functional"
TEST_CLASS = "audio"
TEST_TYPE = "server"
-DEPENDENCIES = 'chameleon, audio_board'
+DEPENDENCIES = 'chameleon:hdmi, audio_board'
JOB_RETRIES = 2
DOC = """
diff --git a/server/site_tests/brillo_Minijail/brillo_Minijail.py b/server/site_tests/brillo_Minijail/brillo_Minijail.py
new file mode 100644
index 0000000..dec854f
--- /dev/null
+++ b/server/site_tests/brillo_Minijail/brillo_Minijail.py
@@ -0,0 +1,26 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+from autotest_lib.client.common_lib import error
+from autotest_lib.server import test
+
+
+class brillo_Minijail(test.test):
+ """Test Minijail sandboxing functionality."""
+ version = 1
+
+ TEST_EXECUTABLE = 'libminijail_test'
+
+ def run_once(self, host=None):
+ """Runs the test.
+
+ @param host: A host object representing the DUT.
+
+ @raise TestFail: The test executable returned an error.
+ """
+ try:
+ host.run(self.TEST_EXECUTABLE)
+ except error.AutoservRunError as are:
+ raise error.TestFail(are)
diff --git a/server/site_tests/brillo_Minijail/control b/server/site_tests/brillo_Minijail/control
new file mode 100644
index 0000000..0f7630d
--- /dev/null
+++ b/server/site_tests/brillo_Minijail/control
@@ -0,0 +1,25 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+
+from autotest_lib.client.common_lib import utils
+
+
+AUTHOR = 'jorgelo'
+NAME = 'brillo_Minijail'
+TIME = 'SHORT'
+TEST_CATEGORY = 'Functional'
+TEST_TYPE = 'Server'
+ATTRIBUTES = 'suite:brillo-bvt,suite:brillo-smoke'
+SUITE = 'brillo-bvt'
+
+DOC = """
+Tests basic Minijail sandboxing functionality.
+"""
+
+def run(machine):
+ job.run_test('brillo_Minijail', host=hosts.create_host(machine))
+
+
+parallel_simple(run, machines)
diff --git a/server/site_tests/brillo_PlaybackAudioTest/brillo_PlaybackAudioTest.py b/server/site_tests/brillo_PlaybackAudioTest/brillo_PlaybackAudioTest.py
index 4fb4d3a..20ac24d 100644
--- a/server/site_tests/brillo_PlaybackAudioTest/brillo_PlaybackAudioTest.py
+++ b/server/site_tests/brillo_PlaybackAudioTest/brillo_PlaybackAudioTest.py
@@ -3,17 +3,31 @@
# found in the LICENSE file.
import logging
+import os
+import subprocess
+import tempfile
import time
import common
-from autotest_lib.client.common_lib.feedback import client
from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.feedback import client
from autotest_lib.server import test
+_BITS_PER_BYTE = 8
# The amount of time to wait when producing silence (i.e. no playback).
_SILENCE_DURATION_SECS = 5
+# Number of channels to generate.
+_DEFAULT_NUM_CHANNELS = 1
+# Sine wave sample rate (48kHz).
+_DEFAULT_SAMPLE_RATE = 48000
+# Sine wave default sample format is signed 16-bit PCM (two bytes).
+_DEFAULT_SAMPLE_WIDTH = 2
+# Default sine wave frequency.
+_DEFAULT_SINE_FREQUENCY = 440
+# Default duration of the sine wave in seconds.
+_DEFAULT_DURATION_SECS = 10
class brillo_PlaybackAudioTest(test.test):
"""Verify that basic audio playback works."""
@@ -24,16 +38,19 @@
self.host = None
- def _get_playback_cmd(self, method):
+ def _get_playback_cmd(self, method, dut_play_file):
"""Get the playback command to execute based on the playback method.
@param method: A string specifiying which method to use.
-
+ @param dut_play_file: A string containing the path to the file to play
+ on the DUT.
@return: A string containing the command to play audio using the
specified method.
@raises TestError: Invalid playback method.
"""
+ if dut_play_file:
+ return 'su root slesTest_playFdPath %s 0' % dut_play_file
if method == 'libmedia':
return 'brillo_audio_test --playback --libmedia --sine'
elif method == 'stagefright':
@@ -44,35 +61,92 @@
raise error.TestError('Test called with invalid playback method.')
- def test_playback(self, fb_query, playback_cmd):
+ def test_playback(self, fb_query, playback_cmd, sample_width, sample_rate,
+ duration_secs, num_channels, play_file_path=None):
"""Performs a playback test.
@param fb_query: A feedback query.
@param playback_cmd: The playback generating command, or None for no-op.
+ @param play_file_path: A string of the path to the file being played.
+ @param sample_width: Sample width to test playback at.
+ @param sample_rate: Sample rate to test playback at.
+ @param num_channels: Number of channels to test playback with.
"""
- fb_query.prepare()
+ fb_query.prepare(sample_width=sample_width,
+ sample_rate=sample_rate,
+ duration_secs=duration_secs,
+ num_channels=num_channels)
if playback_cmd:
self.host.run(playback_cmd)
else:
time.sleep(_SILENCE_DURATION_SECS)
- fb_query.validate()
+ if play_file_path:
+ fb_query.validate(audio_file=play_file_path)
+ else:
+ fb_query.validate()
- def run_once(self, host, fb_client, playback_method):
+ def run_once(self, host, fb_client, playback_method, use_file=False,
+ sample_width=_DEFAULT_SAMPLE_WIDTH,
+ sample_rate=_DEFAULT_SAMPLE_RATE,
+ num_channels=_DEFAULT_NUM_CHANNELS,
+ duration_secs=_DEFAULT_DURATION_SECS):
"""Runs the test.
@param host: A host object representing the DUT.
@param fb_client: A feedback client implementation.
@param playback_method: A string representing a playback method to use.
Either 'opensles', 'libmedia', or 'stagefright'.
+ @param use_file: Use a file to test audio. Must be used with
+ playback_method 'opensles'.
+ @param sample_width: Sample width to test playback at.
+ @param sample_rate: Sample rate to test playback at.
+ @param num_channels: Number of channels to test playback with.
+ @param duration_secs: Duration to play file for.
"""
self.host = host
with fb_client.initialize(self, host):
logging.info('Testing silent playback')
fb_query = fb_client.new_query(client.QUERY_AUDIO_PLAYBACK_SILENT)
- self.test_playback(fb_query, None)
+ self.test_playback(fb_query=fb_query,
+ playback_cmd=None,
+ sample_rate=sample_rate,
+ sample_width=sample_width,
+ num_channels=num_channels,
+ duration_secs=duration_secs)
+
+ dut_play_file = None
+ host_filename = None
+ if use_file:
+ _, host_filename = tempfile.mkstemp(
+ prefix='sine-', suffix='.wav',
+ dir=tempfile.mkdtemp(dir=fb_client.tmp_dir))
+ if sample_width == 1:
+ sine_format = '-e unsigned'
+ else:
+ sine_format = '-e signed'
+ gen_file_cmd = ('sox -n -t wav -c %d %s -b %d -r %d %s synth %d '
+ 'sine %d vol 0.9' % (num_channels, sine_format,
+ sample_width * _BITS_PER_BYTE,
+ sample_rate, host_filename,
+ duration_secs,
+ _DEFAULT_SINE_FREQUENCY))
+ logging.info('Command to generate sine wave: %s', gen_file_cmd)
+ subprocess.call(gen_file_cmd, shell=True)
+ logging.info('Send file to DUT.')
+ dut_tmp_dir = '/data'
+ dut_play_file = os.path.join(dut_tmp_dir, 'sine.wav')
+ logging.info('dut_play_file %s', dut_play_file)
+ host.send_file(host_filename, dut_play_file)
logging.info('Testing audible playback')
fb_query = fb_client.new_query(client.QUERY_AUDIO_PLAYBACK_AUDIBLE)
- self.test_playback(fb_query,
- self._get_playback_cmd(playback_method))
+ playback_cmd = self._get_playback_cmd(playback_method, dut_play_file)
+
+ self.test_playback(fb_query=fb_query,
+ playback_cmd=playback_cmd,
+ sample_rate=sample_rate,
+ sample_width=sample_width,
+ num_channels=num_channels,
+ duration_secs=duration_secs,
+ play_file_path=host_filename)
diff --git a/server/site_tests/brillo_PlaybackAudioTest/control.opensles.file b/server/site_tests/brillo_PlaybackAudioTest/control.opensles.file
new file mode 100644
index 0000000..6a80a2b
--- /dev/null
+++ b/server/site_tests/brillo_PlaybackAudioTest/control.opensles.file
@@ -0,0 +1,48 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from autotest_lib.client.common_lib import utils
+from autotest_lib.server.brillo.feedback import client_factory
+
+
+AUTHOR = 'garnold, ralphnathan'
+NAME = 'brillo_FileOpenSLESPlaybackAudioTest'
+TIME = 'SHORT'
+TEST_CATEGORY = 'Functional'
+TEST_TYPE = 'Server'
+ATTRIBUTES = 'suite:brillo-audio'
+SUITE = 'brillo-audio'
+
+DOC = """
+Tests audio playback using OpenSL ES and a file on a Brillo device.
+
+Test arguments:
+
+ feedback=NAME Name of the feedback client implementation to use. See
+ server.brillo.feedback.client_factory for supported values.
+ Default: 'loop'.
+
+ feedback_args Comma-separated list of initialization arguments for the
+ feedback client. Default: no additional arguments.
+"""
+
+TEST_ARG_NAMES = ()
+args_dict = utils.args_to_dict(args)
+
+
+def run(machine):
+ test_args = {name: args_dict[name] for name in TEST_ARG_NAMES
+ if name in args_dict}
+ fb_client_name = args_dict.get('feedback', 'loop')
+ fb_client = client_factory.get_audio_client(fb_client_name, NAME, machine,
+ args_dict.get('feedback_args'))
+ job.run_test('brillo_PlaybackAudioTest',
+ host=hosts.create_host(machine),
+ fb_client=fb_client,
+ playback_method='opensles',
+ use_file='true',
+ **test_args)
+
+
+parallel_simple(run, machines)
diff --git a/server/site_tests/brillo_RecordingAudioTest/control.libmedia.stereo b/server/site_tests/brillo_RecordingAudioTest/control.libmedia.stereo
index af9a247..af358d0 100644
--- a/server/site_tests/brillo_RecordingAudioTest/control.libmedia.stereo
+++ b/server/site_tests/brillo_RecordingAudioTest/control.libmedia.stereo
@@ -35,7 +35,8 @@
test_args = {name: args_dict[name] for name in TEST_ARG_NAMES
if name in args_dict}
fb_client_name = args_dict.get('feedback', 'loop')
- fb_client = client_factory.get_audio_client(fb_client_name)
+ fb_client = client_factory.get_audio_client(fb_client_name, NAME, machine,
+ args_dict.get('feedback_args'))
job.run_test('brillo_RecordingAudioTest',
host=hosts.create_host(machine),
fb_client=fb_client,
diff --git a/server/site_tests/firmware_PDPowerSwap/control b/server/site_tests/firmware_PDPowerSwap/control
new file mode 100644
index 0000000..cee71ad
--- /dev/null
+++ b/server/site_tests/firmware_PDPowerSwap/control
@@ -0,0 +1,33 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from autotest_lib.server import utils
+
+AUTHOR = "Chrome OS Team"
+NAME = "firmware_PDPowerSwap"
+PURPOSE = "Servo based PD power role swap test"
+CRITERIA = "This test will fail if a power swap gives unexpected results"
+TIME = "SHORT"
+TEST_CATEGORY = "Functional"
+TEST_CLASS = "firmware"
+TEST_TYPE = "server"
+
+DOC = """
+This test checks:
+ - If the DUT advertises support for dualrole operation
+ - Tests if the DUT can receive power swap requests
+ - Tests if the DUT can initiate power swap requests
+"""
+
+args_dict = utils.args_to_dict(args)
+servo_args = hosts.CrosHost.get_servo_arguments(args_dict)
+plankton_args = hosts.CrosHost.get_plankton_arguments(args_dict)
+
+def run(machine):
+ host = hosts.create_host(machine, servo_args=servo_args,
+ plankton_args=plankton_args)
+ job.run_test("firmware_PDPowerSwap", host=host, cmdline_args=args,
+ disable_sysinfo=True)
+
+parallel_simple(run, machines)
\ No newline at end of file
diff --git a/server/site_tests/firmware_PDPowerSwap/firmware_PDPowerSwap.py b/server/site_tests/firmware_PDPowerSwap/firmware_PDPowerSwap.py
new file mode 100644
index 0000000..fd283bb
--- /dev/null
+++ b/server/site_tests/firmware_PDPowerSwap/firmware_PDPowerSwap.py
@@ -0,0 +1,202 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+
+from autotest_lib.client.common_lib import error
+from autotest_lib.server.cros.faft.firmware_test import FirmwareTest
+from autotest_lib.server.cros.servo import pd_console
+
+
+class firmware_PDPowerSwap(FirmwareTest):
+ """
+ Servo based USB PD power role swap test.
+
+ Pass critera is all power role swaps are successful if the DUT
+ is dualrole capable. If not dualrole, then pass criteria is
+ the DUT sending a reject message in response to swap request.
+
+ """
+ version = 1
+
+ PD_ROLE_DELAY = 0.5
+ PD_CONNECT_DELAY = 4
+ PLANKTON_PORT = 0
+ POWER_SWAP_ITERATIONS = 5
+ # Source power role
+ SRC ='SRC_READY'
+ # Sink power role
+ SNK = 'SNK_READY'
+
+ def _set_plankton_power_role_to_src(self):
+ """Force Plankton to act as a source
+
+ @returns True if Plankton power role is source, false otherwise
+ """
+ PLANKTON_SRC_VOLTAGE = 20
+ self.plankton.charge(PLANKTON_SRC_VOLTAGE)
+ # Wait for change to take place
+ time.sleep(self.PD_CONNECT_DELAY)
+ plankton_state = self.plankton_pd_utils.get_pd_state(self.PLANKTON_PORT)
+ # Current Plankton power role should be source
+ return bool(plankton_state == self.SRC)
+
+ def _send_power_swap_get_reply(self, console, port):
+ """Send power swap request, get PD control msg reply
+
+ The PD console debug mode is enabled prior to sending
+ a pd power role swap request message. This allows the
+ control message reply to be extracted. The debug mode
+ is disabled prior to exiting.
+
+ @param console: pd console object for uart access
+
+ @returns: PD control header message
+ """
+ # Enable PD console debug mode to show control messages
+ console.enable_pd_console_debug()
+ cmd = 'pd %d swap power' % port
+ m = console.send_pd_command_get_output(cmd, ['RECV\s([\w]+)'])
+ ctrl_msg = int(m[0][1], 16) & console.PD_CONTROL_MSG_MASK
+ console.disable_pd_console_debug()
+ return ctrl_msg
+
+ def _attempt_power_swap(self, pd_port, direction):
+ """Perform a power role swap request
+
+ Initiate a power role swap request on either the DUT or
+ Plankton depending on the direction parameter. The power
+ role swap is then verified to have taken place.
+
+ @param pd_port: DUT pd port value 0/1
+ @param direction: rx or tx from the DUT perspective
+
+ @returns True if power swap is successful
+ """
+ # Get DUT current power role
+ dut_pr = self.dut_pd_utils.get_pd_state(pd_port)
+ if direction == 'rx':
+ console = self.plankton_pd_utils
+ port = self.PLANKTON_PORT
+ else:
+ console = self.dut_pd_utils
+ port = pd_port
+ # Send power swap request
+ self._send_power_swap_get_reply(console, port)
+ time.sleep(self.PD_CONNECT_DELAY)
+ # Get Plankton power role
+ plankton_pr = self.plankton_pd_utils.get_pd_state(self.PLANKTON_PORT)
+ return bool(dut_pr == plankton_pr)
+
+ def _test_power_swap_reject(self, pd_port):
+ """Verify that a power swap request is rejected
+
+ This tests the case where the DUT isn't in dualrole mode.
+ A power swap request is sent by Plankton, and then
+ the control message checked to ensure the request was rejected.
+ In addition, the connection state is verified to not have
+ changed.
+
+ @param pd_port: port for DUT pd connection
+ """
+ # Get current DUT power role
+ dut_power_role = self.dut_pd_utils.get_pd_state(pd_port)
+ # Send swap command from Plankton and get reply
+ ctrl_msg = self._send_power_swap_get_reply(self.plankton_pd_utils,
+ self.PLANKTON_PORT)
+ if ctrl_msg != self.dut_pd_utils.PD_CONTROL_MSG_DICT['Reject']:
+ raise error.TestFail('Power Swap Req not rejected, returned %r' %
+ ctrl_msg)
+ # Get DUT current state
+ pd_state = self.dut_pd_utils.get_pd_state(pd_port)
+ if pd_state != dut_power_role:
+ raise error.TestFail('PD not connected! pd_state = %r' %
+ pd_state)
+
+ def initialize(self, host, cmdline_args):
+ super(firmware_PDPowerSwap, self).initialize(host, cmdline_args)
+ # Only run in normal mode
+ self.switcher.setup_mode('normal')
+ # Turn off console prints, except for USBPD.
+ self.usbpd.send_command('chan 0x08000000')
+
+ def cleanup(self):
+ self.usbpd.send_command('chan 0xffffffff')
+ super(firmware_PDPowerSwap, self).cleanup()
+
+ def run_once(self):
+ """Execute Power Role swap test.
+
+ 1. Verify that pd console is accessible
+ 2. Verify that DUT has a valid PD contract and connected to Plankton
+ 3. Determine if DUT is in dualrole mode
+ 4. If not dualrole mode, verify DUT rejects power swap request
+ Else test power swap (tx/rx), then Force DUT to be sink or
+ source only and verify rejecttion of power swap request.
+
+ """
+ # create objects for pd utilities
+ self.dut_pd_utils = pd_console.PDConsoleUtils(self.usbpd)
+ self.plankton_pd_utils = pd_console.PDConsoleUtils(self.plankton)
+ self.connect_utils = pd_console.PDConnectionUtils(self.dut_pd_utils,
+ self.plankton_pd_utils)
+
+ # Make sure PD support exists in the UART console
+ if self.dut_pd_utils.verify_pd_console() == False:
+ raise error.TestFail("pd command not present on console!")
+
+ # Type C connection (PD contract) should exist at this point
+ # For this test, the DUT must be connected to a Plankton.
+ pd_port = self.connect_utils.find_dut_to_plankton_connection()
+ if pd_port is None:
+ raise error.TestFail("DUT to Plankton PD connection not found")
+ dut_connect_state = self.dut_pd_utils.get_pd_state(pd_port)
+ logging.info('Initial DUT connect state = %s', dut_connect_state)
+
+ # Get DUT dualrole status
+ if self.dut_pd_utils.is_pd_dual_role_enabled() == False:
+ # DUT does not support dualrole mode, power swap
+ # requests to the DUT should be rejected.
+ logging.info('Power Swap support not advertised by DUT')
+ self._test_power_swap_reject(pd_port)
+ logging.info('Power Swap request rejected by DUT as expected')
+ else:
+ # Start with Plankton as source
+ if self._set_plankton_power_role_to_src() == False:
+ raise error.TestFail('Plankton not set to source')
+ # DUT is dualrole in dual role mode. Test power role swap
+ # operation intiated both by the DUT and Plankton.
+ success = 0
+ for attempt in xrange(self.POWER_SWAP_ITERATIONS):
+ if attempt & 1:
+ direction = 'rx'
+ else:
+ direction = 'tx'
+ if self._attempt_power_swap(pd_port, direction):
+ success += 1
+ new_state = self.dut_pd_utils.get_pd_state(pd_port)
+ logging.info('New DUT power role = %s', new_state)
+
+ if success != self.POWER_SWAP_ITERATIONS:
+ raise error.TestFail('Failed %r power swap attempts' %
+ (self.POWER_SWAP_ITERATIONS - success))
+
+ # Force DUT to only support current power role
+ if new_state == self.SRC:
+ dual_mode = 'src'
+ else:
+ dual_mode = 'snk'
+ logging.info('Setting dualrole mode to %s', dual_mode)
+ self.dut_pd_utils.set_pd_dualrole(dual_mode)
+ time.sleep(self.PD_ROLE_DELAY)
+ # Expect behavior now is that DUT will reject power swap
+ self._test_power_swap_reject(pd_port)
+ logging.info('Power Swap request rejected by DUT as expected')
+ # Restore DUT dual role operation
+ self.dut_pd_utils.set_pd_dualrole('on')
+ # Set connection back to default arrangement
+ self.plankton_pd_utils.set_pd_dualrole('off')
+ self.plankton_pd_utils.send_pd_command('fake disconnect 100 1000')
+
diff --git a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open
index be20ce3..ac5bc91 100644
--- a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open
+++ b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -6,6 +6,9 @@
NAME = 'network_WiFi_ChaosConnectDisconnect.open'
TIME = 'LONG'
TEST_TYPE = 'server'
+ATTRIBUTES = 'suite:wifi_interop'
+SUITE = 'wifi_interop'
+DEPENDENCIES = 'chaos_nightly, chaos_dut'
DOC = """
This script iterates through all of the access points in the AP compatibility
diff --git a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open_n b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open_n
index f72d6ee..5ed6a49 100644
--- a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open_n
+++ b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.open_n
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -6,6 +6,9 @@
NAME = 'network_WiFi_ChaosConnectDisconnect.open_n'
TIME = 'LONG'
TEST_TYPE = 'server'
+ATTRIBUTES = 'suite:wifi_interop'
+SUITE = 'wifi_interop'
+DEPENDENCIES = 'chaos_nightly, chaos_dut'
DOC = """
This script iterates through all of the access points in the AP compatibility
diff --git a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpa2psk b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpa2psk
index a2c45bf..b9c4c48 100644
--- a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpa2psk
+++ b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpa2psk
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -6,6 +6,9 @@
NAME = 'network_WiFi_ChaosConnectDisconnect.wpa2psk'
TIME = 'LONG'
TEST_TYPE = 'server'
+ATTRIBUTES = 'suite:wifi_interop'
+SUITE = 'wifi_interop'
+DEPENDENCIES = 'chaos_nightly, chaos_dut'
DOC = """
This script iterates through all of the access points in the AP compatibility
diff --git a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpapsk b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpapsk
index b9f7027..278c8b7 100644
--- a/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpapsk
+++ b/server/site_tests/network_WiFi_ChaosConnectDisconnect/control.wpapsk
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -6,6 +6,9 @@
NAME = 'network_WiFi_ChaosConnectDisconnect.wpapsk'
TIME = 'LONG'
TEST_TYPE = 'server'
+ATTRIBUTES = 'suite:wifi_interop'
+SUITE = 'wifi_interop'
+DEPENDENCIES = 'chaos_nightly, chaos_dut'
DOC = """
This script iterates through all of the access points in the AP compatibility
diff --git a/server/site_tests/provision_AndroidUpdate/provision_AndroidUpdate.py b/server/site_tests/provision_AndroidUpdate/provision_AndroidUpdate.py
index 47dbc9c..366799c 100644
--- a/server/site_tests/provision_AndroidUpdate/provision_AndroidUpdate.py
+++ b/server/site_tests/provision_AndroidUpdate/provision_AndroidUpdate.py
@@ -8,6 +8,7 @@
from autotest_lib.client.common_lib import global_config
from autotest_lib.server import afe_utils
from autotest_lib.server import test
+from autotest_lib.server.hosts import adb_host
_CONFIG = global_config.global_config
@@ -20,7 +21,8 @@
"""A test that can provision a machine to the correct Android version."""
version = 1
- def initialize(self, host, value, force=False, is_test_na=False):
+ def initialize(self, host, value, force=False, is_test_na=False,
+ repair=False):
"""Initialize.
@param host: The host object to update to |value|.
@@ -32,15 +34,16 @@
and passes the decision via this argument. Note
we can't raise TestNAError in control file as it won't
be caught and handled properly.
+ @param repair: not used by initialize.
"""
if is_test_na:
raise error.TestNAError('Provisioning not applicable.')
# We check value in initialize so that it fails faster.
- if not value:
+ if not (value or repair):
raise error.TestFail('No build version specified.')
- def run_once(self, host, value, force=False):
+ def run_once(self, host, value=None, force=False, repair=False):
"""The method called by the control file to start the test.
@param host: The host object to update to |value|.
@@ -50,10 +53,17 @@
the current image version. If False and the image
version matches our expected image version, no
provisioning will be done.
+ @param repair: If True, we are doing a repair provision, therefore the
+ build to provision is looked up from the AFE's
+ get_stable_version RPC.
"""
logging.debug('Start provisioning %s to %s', host, value)
+ if not value and not repair:
+ raise error.TestFail('No build provided and this is not a repair '
+ ' job.')
+
# If the host is already on the correct build, we have nothing to do.
if not force and afe_utils.get_build(host) == value:
# We can't raise a TestNA, as would make sense, as that makes
@@ -64,11 +74,27 @@
'Host already running %s' % value)
return
- url, _ = host.stage_build_for_install(value)
+ os_type = None
+ board = afe_utils.get_board(host)
+ if board:
+ logging.debug('Host %s is board type: %s', host, board)
+ if adb_host.OS_TYPE_BRILLO in board:
+ os_type = adb_host.OS_TYPE_BRILLO
+ else:
+ os_type = adb_host.OS_TYPE_ANDROID
+
+ if repair:
+ board=board.split('-')[-1]
+ value = afe_utils.get_stable_version(board=board, android=True)
+ if not value:
+ raise error.TestFail('No stable version assigned for board: '
+ '%s' % board)
+ url, _ = host.stage_build_for_install(value, os_type=os_type)
logging.debug('Installing image from: %s', url)
try:
- afe_utils.machine_install_and_update_labels(host, build_url=url)
+ afe_utils.machine_install_and_update_labels(
+ host, build_url=url, os_type=os_type)
except error.InstallError as e:
logging.error(e)
raise error.TestFail(str(e))
diff --git a/site_utils/stable_version_utils.py b/site_utils/stable_version_utils.py
index 6452be1..0477c34 100644
--- a/site_utils/stable_version_utils.py
+++ b/site_utils/stable_version_utils.py
@@ -35,21 +35,31 @@
return versions
-def get(board=DEFAULT):
+def get(board=DEFAULT, android=False):
"""Get stable version for the given board.
@param board: Name of the board, default to value `DEFAULT`.
+ @param android: If True, indicates we are looking up a Android/Brillo-based
+ board. There is no default version that works for all
+ Android/Brillo boards. If False, we are looking up a Chrome
+ OS based board.
+
@return: Stable version of the given board. If the given board is not listed
in afe_stable_versions table, DEFAULT will be used.
Return global_config value of CROS.stable_cros_version if
afe_stable_versions table does not have entry of board DEFAULT.
"""
+ if board == DEFAULT and android:
+ return None
try:
return models.StableVersion.objects.get(board=board).version
except django.core.exceptions.ObjectDoesNotExist:
if board == DEFAULT:
return global_config.global_config.get_config_value(
'CROS', 'stable_cros_version')
+ elif android:
+ return global_config.global_config.get_config_value(
+ 'ANDROID', 'stable_version_%s' % board, default=None)
else:
return get(board=DEFAULT)
diff --git a/suite_scheduler.ini b/suite_scheduler.ini
index 471285a..7cb867a 100644
--- a/suite_scheduler.ini
+++ b/suite_scheduler.ini
@@ -419,12 +419,40 @@
branch_specs: ==tot
pool: stress-wifi
-[WiFi_Interop_Nightly]
+[WiFi_Interop_WP2]
run_on: nightly
-hour: 10
+day: 4
suite: wifi_interop
branch_specs: ==tot
-pool:wifichaos
+pool:wifichaos_WP2
+
+[WiFi_Interop_StP2]
+run_on: nightly
+day: 5
+suite: wifi_interop
+branch_specs: ==tot
+pool:wifichaos_StP2
+
+[WiFi_Interop_8797]
+run_on: nightly
+day: 6
+suite: wifi_interop
+branch_specs: ==tot
+pool:wifichaos_8797
+
+[WiFi_Interop_8897]
+run_on: nightly
+day: 0
+suite: wifi_interop
+branch_specs: ==tot
+pool:wifichaos_8897
+
+[WiFi_Interop_4354]
+run_on: nightly
+day: 1
+suite: wifi_interop
+branch_specs: ==tot
+pool:wifichaos_4354
[WiFi_LucidSleep_Nightly]
run_on: nightly
diff --git a/test_suites/control.wifi_interop b/test_suites/control.wifi_interop
index 3111b1e..b56aa06 100644
--- a/test_suites/control.wifi_interop
+++ b/test_suites/control.wifi_interop
@@ -1,4 +1,4 @@
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -27,11 +27,12 @@
from autotest_lib.server.cros import provision
from autotest_lib.server.cros.dynamic_suite import dynamic_suite
+
dynamic_suite.reimage_and_run(
build=build, board=board, name=NAME, job=job, pool=pool,
check_hosts=check_hosts, add_experimental=True, num=num,
- file_bugs=file_bugs, priority=priority, timeout_mins=120,
- max_runtime_mins=120, devserver_url=devserver_url,
- version_prefix=provision.CROS_VERSION_PREFIX,
- wait_for_results=wait_for_results, job_retry=job_retry,
- max_retries=max_retries)
+ file_bugs=file_bugs, priority=priority, timeout_mins=1440,
+ max_runtime_mins=1440, devserver_url=devserver_url,
+ version_prefix=provision.CROS_VERSION_PREFIX, job_retry=job_retry,
+ max_retries=max_retries, suite_dependencies='chaos_nightly',
+ wait_for_results=wait_for_results)
diff --git a/tko/perf_upload/perf_uploader.py b/tko/perf_upload/perf_uploader.py
index 64fa3e6..4bce337 100644
--- a/tko/perf_upload/perf_uploader.py
+++ b/tko/perf_upload/perf_uploader.py
@@ -166,7 +166,7 @@
def _format_for_upload(platform_name, cros_version, chrome_version,
hardware_id, variant_name, hardware_hostname,
- perf_data, presentation_info, afe_job_id):
+ perf_data, presentation_info):
"""Formats perf data suitably to upload to the perf dashboard.
The perf dashboard expects perf data to be uploaded as a
@@ -188,8 +188,6 @@
_compute_avg_stddev().
@param presentation_info: A dictionary of dashboard presentation info for
the given test, as identified by _gather_presentation_info().
- @param afe_job_id: A string uniquely identifying the test run, this enables
- linking back from a test result to the logs of the test run.
@return A dictionary containing the formatted information ready to upload
to the performance dashboard.
@@ -228,8 +226,6 @@
'a_default_rev': 'r_chrome_version',
'a_hardware_identifier': hardware_id,
'a_hardware_hostname': hardware_hostname,
- 'a_variant_name': variant_name,
- 'a_afe_job_id': afe_job_id,
}
}
@@ -379,7 +375,6 @@
hardware_hostname = test.machine
variant_name = test.attributes.get(constants.VARIANT_KEY, None)
config_data = _parse_config_file(_PRESENTATION_CONFIG_FILE)
- afe_job_id = job.afe_job_id
try:
shadow_config_data = _parse_config_file(_PRESENTATION_SHADOW_CONFIG_FILE)
config_data.update(shadow_config_data)
@@ -391,8 +386,7 @@
presentation_info = _gather_presentation_info(config_data, test_name)
formatted_data = _format_for_upload(
platform_name, cros_version, chrome_version, hardware_id,
- variant_name, hardware_hostname, perf_data, presentation_info,
- afe_job_id)
+ variant_name, hardware_hostname, perf_data, presentation_info)
_send_to_dashboard(formatted_data)
except PerfUploadingError as e:
tko_utils.dprint('Error when uploading perf data to the perf '
diff --git a/tko/perf_upload/perf_uploader_unittest.py b/tko/perf_upload/perf_uploader_unittest.py
index 6655e3c..5979550 100644
--- a/tko/perf_upload/perf_uploader_unittest.py
+++ b/tko/perf_upload/perf_uploader_unittest.py
@@ -381,14 +381,6 @@
expected[idx]['supplemental_columns']['a_hardware_hostname'],
msg=fail_msg)
self.assertEqual(
- actual[idx]['supplemental_columns']['a_afe_job_id'],
- expected[idx]['supplemental_columns']['a_afe_job_id'],
- msg=fail_msg)
- self.assertEqual(
- actual[idx]['supplemental_columns']['a_variant_name'],
- expected[idx]['supplemental_columns']['a_variant_name'],
- msg=fail_msg)
- self.assertEqual(
actual[idx]['bot'], expected[idx]['bot'], msg=fail_msg)
self.assertEqual(
actual[idx]['revision'], expected[idx]['revision'], msg=fail_msg)
@@ -414,14 +406,12 @@
"""Verifies format_for_upload generates correct json data."""
result = perf_uploader._format_for_upload(
'platform', '25.1200.0.0', '25.10.1000.0', 'WINKY E2A-F2K-Q35',
- 'i7', 'test_machine', self._perf_data, self._PRESENT_INFO, '52926644-username')
+ 'i7', 'test_machine', self._perf_data, self._PRESENT_INFO)
expected_result_string = (
'[{"supplemental_columns": {"r_cros_version": "25.1200.0.0", '
'"a_default_rev" : "r_chrome_version",'
'"a_hardware_identifier" : "WINKY E2A-F2K-Q35",'
'"a_hardware_hostname" : "test_machine",'
- '"a_afe_job_id" : "52926644-username",'
- '"a_variant_name" : "i7",'
'"r_chrome_version": "25.10.1000.0"}, "bot": "cros-platform-i7", '
'"higher_is_better": false, "value": 2.7, '
'"revision": 10000000120000000, '
@@ -431,8 +421,6 @@
'"a_default_rev" : "r_chrome_version",'
'"a_hardware_identifier" : "WINKY E2A-F2K-Q35",'
'"a_hardware_hostname" : "test_machine",'
- '"a_afe_job_id" : "52926644-username",'
- '"a_variant_name" : "i7",'
'"r_chrome_version": "25.10.1000.0"}, "bot": "cros-platform-i7", '
'"higher_is_better": true, "value": 101.35, '
'"revision": 10000000120000000, '