Merge "BT with WiFi interference Test"
diff --git a/acts/README.md b/acts/README.md
index a4e9950..55c69b8 100644
--- a/acts/README.md
+++ b/acts/README.md
@@ -5,7 +5,7 @@
for accessing commercially avilable devices, Android devices, and a collection
of utility functions to further ease test development. It is an ideal desktop
tool for a wireless stack developer or integrator whether exercising a new code
-path, performing sanity testing, or running extended regression test suites.
+path, performing confidence testing, or running extended regression test suites.
Included in the tests/google directory are a bundle of tests, many of which can
be run with as little as one or two Android devices with wifi, cellular, or
@@ -76,7 +76,7 @@
2. Run "python3.4 setup.py install" with elevated permissions
3. To verify ACTS is ready to go, at the location for README, and run:
cd framework/tests/ \
- && act.py -c acts_sanity_test_config.json -tc IntegrationTest
+ && act.py -c acts_confidence_test_config.json -tc IntegrationTest
After installation, `act.py` will be in usr/bin and can be called as command
line utilities. Components in ACTS are importable under the package "acts."
@@ -90,11 +90,11 @@
## Breaking Down a Sample Command
-Above, the command `act.py -c acts_sanity_test_config.json -tc IntegrationTest`
+Above, the command `act.py -c acts_confidence_test_config.json -tc IntegrationTest`
was run to verify ACTS was properly set up.
Below are the components of that command:
- `act.py`: is the script that runs the test
-- -c acts_sanity_test_config: is the flag and name of the configuration file
+- -c acts_confidence_test_config: is the flag and name of the configuration file
to be used in the test
- -tc IntegrationTest: is the name of the test case
diff --git a/acts/framework/acts/controllers/adb.py b/acts/framework/acts/controllers/adb.py
index ff1a2ab..68ba955 100644
--- a/acts/framework/acts/controllers/adb.py
+++ b/acts/framework/acts/controllers/adb.py
@@ -19,6 +19,7 @@
import logging
import re
import shlex
+import shutil
from acts.controllers.adb_lib.error import AdbError
from acts.libs.proc import job
@@ -73,7 +74,7 @@
"""
self.serial = serial
self._server_local_port = None
- adb_path = job.run("which adb").stdout
+ adb_path = shutil.which('adb')
adb_cmd = [shlex.quote(adb_path)]
if serial:
adb_cmd.append("-s %s" % serial)
diff --git a/acts/framework/acts/controllers/fuchsia_lib/syslog_lib.py b/acts/framework/acts/controllers/fuchsia_lib/syslog_lib.py
index a8c102d..2b2f024 100644
--- a/acts/framework/acts/controllers/fuchsia_lib/syslog_lib.py
+++ b/acts/framework/acts/controllers/fuchsia_lib/syslog_lib.py
@@ -102,8 +102,9 @@
def start(self):
"""Starts reading the data from the syslog ssh connection."""
if self._started:
- raise FuchsiaSyslogError('Syslog has already started for '
- 'FuchsiaDevice (%s).' % self.ip_address)
+ logging.info('Syslog has already started for FuchsiaDevice (%s).' %
+ self.ip_address)
+ return None
self._started = True
self._listening_thread = Thread(target=self._exec_loop)
@@ -122,8 +123,9 @@
threads.
"""
if self._stopped:
- raise FuchsiaSyslogError('Syslog is already being stopped for '
- 'FuchsiaDevice (%s).' % self.ip_address)
+ logging.info('Syslog is already stopped for FuchsiaDevice (%s).' %
+ self.ip_address)
+ return None
self._stopped = True
try:
diff --git a/acts/framework/acts/libs/logging/log_stream.py b/acts/framework/acts/libs/logging/log_stream.py
index fbf0474..1003a7d 100644
--- a/acts/framework/acts/libs/logging/log_stream.py
+++ b/acts/framework/acts/libs/logging/log_stream.py
@@ -235,7 +235,8 @@
# Add a NullHandler to suppress unwanted console output
self.logger.addHandler(_null_handler)
self.logger.propagate = False
- self.base_path = base_path or logging.log_path
+ self.base_path = base_path or getattr(logging, 'log_path',
+ '/tmp/acts_logs')
self.subcontext = subcontext
context.TestContext.add_base_output_path(self.logger.name, self.base_path)
context.TestContext.add_subcontext(self.logger.name, self.subcontext)
diff --git a/acts/framework/acts/test_utils/bt/BtSarBaseTest.py b/acts/framework/acts/test_utils/bt/BtSarBaseTest.py
index 762af13..e70b060 100644
--- a/acts/framework/acts/test_utils/bt/BtSarBaseTest.py
+++ b/acts/framework/acts/test_utils/bt/BtSarBaseTest.py
@@ -454,7 +454,7 @@
}
if 'BTHotspot' in signal_dict.keys():
- device_state_dict[('BT Tethering',
+ device_state_dict[('Bluetooth tethering',
'bt_tethering')] = signal_dict['BTHotspot']
enforced_state = {}
diff --git a/acts/framework/acts/test_utils/bt/bt_test_utils.py b/acts/framework/acts/test_utils/bt/bt_test_utils.py
index 4084b8c..bfcf684 100644
--- a/acts/framework/acts/test_utils/bt/bt_test_utils.py
+++ b/acts/framework/acts/test_utils/bt/bt_test_utils.py
@@ -777,8 +777,9 @@
sum(metrics_dict["rssi"][ad.serial]) /
len(metrics_dict["rssi"][ad.serial]), 2)
# Returns last noted value for power level
- metrics_dict["pwlv"][ad.serial] = metrics_dict["pwlv"][
- ad.serial][-1]
+ metrics_dict["pwlv"][ad.serial] = float(
+ sum(metrics_dict["pwlv"][ad.serial]) /
+ len(metrics_dict["pwlv"][ad.serial]))
return metrics_dict
diff --git a/acts/framework/acts/test_utils/gnss/gnss_test_utils.py b/acts/framework/acts/test_utils/gnss/gnss_test_utils.py
index e5ba443..97ca0ed 100644
--- a/acts/framework/acts/test_utils/gnss/gnss_test_utils.py
+++ b/acts/framework/acts/test_utils/gnss/gnss_test_utils.py
@@ -896,6 +896,8 @@
begin_time)
if crash_result:
raise signals.TestError("GPSTool crashed. Abort test.")
+ # wait 10 seconds to avoid logs not writing into logcat yet
+ time.sleep(10)
return ttff_data
diff --git a/acts/framework/acts/test_utils/gnss/gnss_testlog_utils.py b/acts/framework/acts/test_utils/gnss/gnss_testlog_utils.py
index 0792da4..1611018 100644
--- a/acts/framework/acts/test_utils/gnss/gnss_testlog_utils.py
+++ b/acts/framework/acts/test_utils/gnss/gnss_testlog_utils.py
@@ -25,50 +25,84 @@
# GPS API Log Reading Config
CONFIG_GPSAPILOG = {
'phone_time':
- r'(?P<date>\d+\/\d+\/\d+)\s+(?P<time>\d+:\d+:\d+)\s+'
+ r'^(?P<date>\d+\/\d+\/\d+)\s+(?P<time>\d+:\d+:\d+)\s+'
r'Read:\s+(?P<logsize>\d+)\s+bytes',
'SpaceVehicle':
- r'Fix:\s+(?P<Fix>\w+)\s+Type:\s+(?P<Type>\w+)\s+'
+ r'^Fix:\s+(?P<Fix>\w+)\s+Type:\s+(?P<Type>\w+)\s+'
r'SV:\s+(?P<SV>\d+)\s+C\/No:\s+(?P<CNo>\d+\.\d+)\s+'
r'Elevation:\s+(?P<Elevation>\d+\.\d+)\s+'
r'Azimuth:\s+(?P<Azimuth>\d+\.\d+)\s+'
r'Signal:\s+(?P<Signal>\w+)\s+'
r'Frequency:\s+(?P<Frequency>\d+\.\d+)\s+'
r'EPH:\s+(?P<EPH>\w+)\s+ALM:\s+(?P<ALM>\w+)',
+ 'SpaceVehicle_wBB':
+ r'^Fix:\s+(?P<Fix>\w+)\s+Type:\s+(?P<Type>\w+)\s+'
+ r'SV:\s+(?P<SV>\d+)\s+C\/No:\s+(?P<AntCNo>\d+\.\d+),\s+'
+ r'(?P<BbCNo>\d+\.\d+)\s+'
+ r'Elevation:\s+(?P<Elevation>\d+\.\d+)\s+'
+ r'Azimuth:\s+(?P<Azimuth>\d+\.\d+)\s+'
+ r'Signal:\s+(?P<Signal>\w+)\s+'
+ r'Frequency:\s+(?P<Frequency>\d+\.\d+)\s+'
+ r'EPH:\s+(?P<EPH>\w+)\s+ALM:\s+(?P<ALM>\w+)',
'HistoryAvgTop4CNo':
- r'History\s+Avg\s+Top4\s+:\s+(?P<HistoryAvgTop4CNo>\d+\.\d+)',
+ r'^History\s+Avg\s+Top4\s+:\s+(?P<HistoryAvgTop4CNo>\d+\.\d+)',
'CurrentAvgTop4CNo':
- r'Current\s+Avg\s+Top4\s+:\s+(?P<CurrentAvgTop4CNo>\d+\.\d+)',
+ r'^Current\s+Avg\s+Top4\s+:\s+(?P<CurrentAvgTop4CNo>\d+\.\d+)',
'HistoryAvgCNo':
- r'History\s+Avg\s+:\s+(?P<HistoryAvgCNo>\d+\.\d+)',
+ r'^History\s+Avg\s+:\s+(?P<HistoryAvgCNo>\d+\.\d+)',
'CurrentAvgCNo':
- r'Current\s+Avg\s+:\s+(?P<CurrentAvgCNo>\d+\.\d+)',
+ r'^Current\s+Avg\s+:\s+(?P<CurrentAvgCNo>\d+\.\d+)',
+ 'AntennaHistoryAvgTop4CNo':
+ r'^Antenna_History\s+Avg\s+Top4\s+:\s+(?P<AntennaHistoryAvgTop4CNo>\d+\.\d+)',
+ 'AntennaCurrentAvgTop4CNo':
+ r'^Antenna_Current\s+Avg\s+Top4\s+:\s+(?P<AntennaCurrentAvgTop4CNo>\d+\.\d+)',
+ 'AntennaHistoryAvgCNo':
+ r'^Antenna_History\s+Avg\s+:\s+(?P<AntennaHistoryAvgCNo>\d+\.\d+)',
+ 'AntennaCurrentAvgCNo':
+ r'^Antenna_Current\s+Avg\s+:\s+(?P<AntennaCurrentAvgCNo>\d+\.\d+)',
+ 'BasebandHistoryAvgTop4CNo':
+ r'^Baseband_History\s+Avg\s+Top4\s+:\s+(?P<BasebandHistoryAvgTop4CNo>\d+\.\d+)',
+ 'BasebandCurrentAvgTop4CNo':
+ r'^Baseband_Current\s+Avg\s+Top4\s+:\s+(?P<BasebandCurrentAvgTop4CNo>\d+\.\d+)',
+ 'BasebandHistoryAvgCNo':
+ r'^Baseband_History\s+Avg\s+:\s+(?P<BasebandHistoryAvgCNo>\d+\.\d+)',
+ 'BasebandCurrentAvgCNo':
+ r'^Baseband_Current\s+Avg\s+:\s+(?P<BasebandCurrentAvgCNo>\d+\.\d+)',
'L5inFix':
- r'L5\s+used\s+in\s+fix:\s+(?P<L5inFix>\w+)',
+ r'^L5\s+used\s+in\s+fix:\s+(?P<L5inFix>\w+)',
'L5EngagingRate':
- r'L5\s+engaging\s+rate:\s+(?P<L5EngagingRate>\d+.\d+)%',
+ r'^L5\s+engaging\s+rate:\s+(?P<L5EngagingRate>\d+.\d+)%',
'Provider':
- r'Provider:\s+(?P<Provider>\w+)',
+ r'^Provider:\s+(?P<Provider>\w+)',
'Latitude':
- r'Latitude:\s+(?P<Latitude>-?\d+.\d+)',
+ r'^Latitude:\s+(?P<Latitude>-?\d+.\d+)',
'Longitude':
- r'Longitude:\s+(?P<Longitude>-?\d+.\d+)',
+ r'^Longitude:\s+(?P<Longitude>-?\d+.\d+)',
'Altitude':
- r'Altitude:\s+(?P<Altitude>-?\d+.\d+)',
+ r'^Altitude:\s+(?P<Altitude>-?\d+.\d+)',
'GNSSTime':
- r'Time:\s+(?P<Date>\d+\/\d+\/\d+)\s+'
+ r'^Time:\s+(?P<Date>\d+\/\d+\/\d+)\s+'
r'(?P<Time>\d+:\d+:\d+)',
'Speed':
- r'Speed:\s+(?P<Speed>\d+.\d+)',
+ r'^Speed:\s+(?P<Speed>\d+.\d+)',
'Bearing':
- r'Bearing:\s+(?P<Bearing>\d+.\d+)',
+ r'^Bearing:\s+(?P<Bearing>\d+.\d+)',
}
# Space Vehicle Statistics Dataframe List
+# Handle the pre GPSTool 2.12.24 case
LIST_SVSTAT = [
'HistoryAvgTop4CNo', 'CurrentAvgTop4CNo', 'HistoryAvgCNo', 'CurrentAvgCNo',
'L5inFix', 'L5EngagingRate'
]
+# Handle the post GPSTool 2.12.24 case with baseband CNo
+LIST_SVSTAT_WBB = [
+ 'AntennaHistoryAvgTop4CNo', 'AntennaCurrentAvgTop4CNo',
+ 'AntennaHistoryAvgCNo', 'AntennaCurrentAvgCNo',
+ 'BasebandHistoryAvgTop4CNo', 'BasebandCurrentAvgTop4CNo',
+ 'BasebandHistoryAvgCNo', 'BasebandCurrentAvgCNo', 'L5inFix',
+ 'L5EngagingRate'
+]
# Location Fix Info Dataframe List
LIST_LOCINFO = [
@@ -150,7 +184,7 @@
if index_rownum and not parsed_data[key].empty:
parsed_data[key].set_index('rownumber', inplace=True)
elif parsed_data[key].empty:
- LOGPARSE_UTIL_LOGGER.warning(
+ LOGPARSE_UTIL_LOGGER.debug(
'The parsed dataframe of "%s" is empty.', key)
# Return parsed data list
@@ -315,24 +349,43 @@
# Add phone_time from timestamp dataframe by row number
for key in parsed_data:
- if key != 'phone_time':
+ if (key != 'phone_time') and (not parsed_data[key].empty):
parsed_data[key] = pds.merge_asof(parsed_data[key],
parsed_data['phone_time'],
left_index=True,
right_index=True)
# Get space vehicle info dataframe
- sv_info_df = parsed_data['SpaceVehicle']
+ # Handle the pre GPSTool 2.12.24 case
+ if not parsed_data['SpaceVehicle'].empty:
+ sv_info_df = parsed_data['SpaceVehicle']
+
+ # Handle the post GPSTool 2.12.24 case with baseband CNo
+ elif not parsed_data['SpaceVehicle_wBB'].empty:
+ sv_info_df = parsed_data['SpaceVehicle_wBB']
# Get space vehicle statistics dataframe
- # First merge all dataframe from LIST_SVSTAT[1:],
- sv_stat_df = fts.reduce(
- lambda item1, item2: pds.merge(item1, item2, on='phone_time'),
- [parsed_data[key] for key in LIST_SVSTAT[1:]])
- # Then merge with LIST_SVSTAT[0]
- sv_stat_df = pds.merge(sv_stat_df,
- parsed_data[LIST_SVSTAT[0]],
- on='phone_time')
+ # Handle the pre GPSTool 2.12.24 case
+ if not parsed_data['HistoryAvgTop4CNo'].empty:
+ # First merge all dataframe from LIST_SVSTAT[1:],
+ sv_stat_df = fts.reduce(
+ lambda item1, item2: pds.merge(item1, item2, on='phone_time'),
+ [parsed_data[key] for key in LIST_SVSTAT[1:]])
+ # Then merge with LIST_SVSTAT[0]
+ sv_stat_df = pds.merge(sv_stat_df,
+ parsed_data[LIST_SVSTAT[0]],
+ on='phone_time')
+
+ # Handle the post GPSTool 2.12.24 case with baseband CNo
+ elif not parsed_data['AntennaHistoryAvgTop4CNo'].empty:
+ # First merge all dataframe from LIST_SVSTAT[1:],
+ sv_stat_df = fts.reduce(
+ lambda item1, item2: pds.merge(item1, item2, on='phone_time'),
+ [parsed_data[key] for key in LIST_SVSTAT_WBB[1:]])
+ # Then merge with LIST_SVSTAT[0]
+ sv_stat_df = pds.merge(sv_stat_df,
+ parsed_data[LIST_SVSTAT_WBB[0]],
+ on='phone_time')
# Get location fix information dataframe
# First merge all dataframe from LIST_LOCINFO[1:],
@@ -353,17 +406,53 @@
timestamp_df['logsize'] = timestamp_df['logsize'].astype(int)
sv_info_df['SV'] = sv_info_df['SV'].astype(int)
- sv_info_df['CNo'] = sv_info_df['CNo'].astype(float)
sv_info_df['Elevation'] = sv_info_df['Elevation'].astype(float)
sv_info_df['Azimuth'] = sv_info_df['Azimuth'].astype(float)
sv_info_df['Frequency'] = sv_info_df['Frequency'].astype(float)
- sv_stat_df['CurrentAvgTop4CNo'] = sv_stat_df['CurrentAvgTop4CNo'].astype(
- float)
- sv_stat_df['CurrentAvgCNo'] = sv_stat_df['CurrentAvgCNo'].astype(float)
- sv_stat_df['HistoryAvgTop4CNo'] = sv_stat_df['HistoryAvgTop4CNo'].astype(
- float)
- sv_stat_df['HistoryAvgCNo'] = sv_stat_df['HistoryAvgCNo'].astype(float)
+ if 'CNo' in list(sv_info_df.columns):
+ sv_info_df['CNo'] = sv_info_df['CNo'].astype(float)
+ sv_info_df['AntCNo'] = sv_info_df['CNo']
+ elif 'AntCNo' in list(sv_info_df.columns):
+ sv_info_df['AntCNo'] = sv_info_df['AntCNo'].astype(float)
+ sv_info_df['BbCNo'] = sv_info_df['BbCNo'].astype(float)
+
+ if 'CurrentAvgTop4CNo' in list(sv_stat_df.columns):
+ sv_stat_df['CurrentAvgTop4CNo'] = sv_stat_df[
+ 'CurrentAvgTop4CNo'].astype(float)
+ sv_stat_df['CurrentAvgCNo'] = sv_stat_df['CurrentAvgCNo'].astype(float)
+ sv_stat_df['HistoryAvgTop4CNo'] = sv_stat_df[
+ 'HistoryAvgTop4CNo'].astype(float)
+ sv_stat_df['HistoryAvgCNo'] = sv_stat_df['HistoryAvgCNo'].astype(float)
+ sv_stat_df['AntennaCurrentAvgTop4CNo'] = sv_stat_df[
+ 'CurrentAvgTop4CNo']
+ sv_stat_df['AntennaCurrentAvgCNo'] = sv_stat_df['CurrentAvgCNo']
+ sv_stat_df['AntennaHistoryAvgTop4CNo'] = sv_stat_df[
+ 'HistoryAvgTop4CNo']
+ sv_stat_df['AntennaHistoryAvgCNo'] = sv_stat_df['HistoryAvgCNo']
+ sv_stat_df['BasebandCurrentAvgTop4CNo'] = npy.nan
+ sv_stat_df['BasebandCurrentAvgCNo'] = npy.nan
+ sv_stat_df['BasebandHistoryAvgTop4CNo'] = npy.nan
+ sv_stat_df['BasebandHistoryAvgCNo'] = npy.nan
+
+ elif 'AntennaCurrentAvgTop4CNo' in list(sv_stat_df.columns):
+ sv_stat_df['AntennaCurrentAvgTop4CNo'] = sv_stat_df[
+ 'AntennaCurrentAvgTop4CNo'].astype(float)
+ sv_stat_df['AntennaCurrentAvgCNo'] = sv_stat_df[
+ 'AntennaCurrentAvgCNo'].astype(float)
+ sv_stat_df['AntennaHistoryAvgTop4CNo'] = sv_stat_df[
+ 'AntennaHistoryAvgTop4CNo'].astype(float)
+ sv_stat_df['AntennaHistoryAvgCNo'] = sv_stat_df[
+ 'AntennaHistoryAvgCNo'].astype(float)
+ sv_stat_df['BasebandCurrentAvgTop4CNo'] = sv_stat_df[
+ 'BasebandCurrentAvgTop4CNo'].astype(float)
+ sv_stat_df['BasebandCurrentAvgCNo'] = sv_stat_df[
+ 'BasebandCurrentAvgCNo'].astype(float)
+ sv_stat_df['BasebandHistoryAvgTop4CNo'] = sv_stat_df[
+ 'BasebandHistoryAvgTop4CNo'].astype(float)
+ sv_stat_df['BasebandHistoryAvgCNo'] = sv_stat_df[
+ 'BasebandHistoryAvgCNo'].astype(float)
+
sv_stat_df['L5EngagingRate'] = sv_stat_df['L5EngagingRate'].astype(float)
loc_info_df['Latitude'] = loc_info_df['Latitude'].astype(float)
diff --git a/acts/framework/acts/test_utils/power/PowerBTBaseTest.py b/acts/framework/acts/test_utils/power/PowerBTBaseTest.py
index 8979822..3ea3d80 100644
--- a/acts/framework/acts/test_utils/power/PowerBTBaseTest.py
+++ b/acts/framework/acts/test_utils/power/PowerBTBaseTest.py
@@ -27,7 +27,8 @@
INIT_ATTEN = [0]
-def ramp_attenuation(obj_atten, attenuation_target):
+def ramp_attenuation(obj_atten, attenuation_target, attenuation_step_max=20,
+ time_wait_in_between=5 ):
"""Ramp the attenuation up or down for BT tests.
Ramp the attenuation slowly so it won't have dramatic signal drop to affect
@@ -36,15 +37,16 @@
Args:
obj_atten: attenuator object, a single port attenuator
attenuation_target: target attenuation level to reach to.
+ attenuation_step_max: max step for attenuation set
+ time_wait_in_between: wait time between attenuation changes
"""
- attenuation_step_max = 20
sign = lambda x: copysign(1, x)
attenuation_delta = obj_atten.get_atten() - attenuation_target
while abs(attenuation_delta) > attenuation_step_max:
attenuation_intermediate = obj_atten.get_atten(
) - sign(attenuation_delta) * attenuation_step_max
obj_atten.set_atten(attenuation_intermediate)
- time.sleep(5)
+ time.sleep(time_wait_in_between)
attenuation_delta = obj_atten.get_atten() - attenuation_target
obj_atten.set_atten(attenuation_target)
diff --git a/acts/framework/acts/test_utils/tel/tel_test_utils.py b/acts/framework/acts/test_utils/tel/tel_test_utils.py
index fbb6917..7b4bc20 100644
--- a/acts/framework/acts/test_utils/tel/tel_test_utils.py
+++ b/acts/framework/acts/test_utils/tel/tel_test_utils.py
@@ -3747,6 +3747,7 @@
}
url_map = {
"1MB": [
+ "http://146.148.91.8/download/1MB.zip",
"http://ipv4.download.thinkbroadband.com/1MB.zip"
],
"5MB": [
diff --git a/acts/framework/tests/acts_confidence_test_config.json b/acts/framework/tests/acts_confidence_test_config.json
new file mode 100644
index 0000000..566beba
--- /dev/null
+++ b/acts/framework/tests/acts_confidence_test_config.json
@@ -0,0 +1,15 @@
+{
+ "testbed":
+ [
+ {
+ "_description": "ACTS confidence test bed, no device needed.",
+ "name": "Confidence",
+ "icecream": 42,
+ "MagicDevice": ["Magic!"]
+ }
+ ],
+ "logpath": "/tmp/logs",
+ "testpaths": ["./"],
+ "icecream": "mememe",
+ "extra_param": "haha"
+}
diff --git a/acts/tests/google/wifi/WifiPnoTest.py b/acts/tests/google/wifi/WifiPnoTest.py
index 4bfa1d7..ba2eb4e 100644
--- a/acts/tests/google/wifi/WifiPnoTest.py
+++ b/acts/tests/google/wifi/WifiPnoTest.py
@@ -115,17 +115,17 @@
finally:
pass
- def add_and_enable_dummy_networks(self, num_networks):
- """Add some dummy networks to the device and enable them.
+ def add_and_enable_test_networks(self, num_networks):
+ """Add some test networks to the device and enable them.
Args:
num_networks: Number of networks to add.
"""
- ssid_name_base = "pno_dummy_network_"
+ ssid_name_base = "pno_test_network_"
for i in range(0, num_networks):
network = {}
network[WifiEnums.SSID_KEY] = ssid_name_base + str(i)
- network[WifiEnums.PWD_KEY] = "pno_dummy"
+ network[WifiEnums.PWD_KEY] = "pno_test"
self.add_network_and_enable(network)
def add_network_and_enable(self, network):
@@ -178,13 +178,13 @@
16 is the max list size of PNO watch list for most devices. The device
should automatically pick the 16 latest added networks in the list.
- So add 16 dummy networks and then add 2 valid networks.
+ So add 16 test networks and then add 2 valid networks.
Steps:
- 1. Save 16 dummy network configurations in the device.
+ 1. Save 16 test network configurations in the device.
2. Run the simple pno test.
"""
- self.add_and_enable_dummy_networks(16)
+ self.add_and_enable_test_networks(16)
self.add_network_and_enable(self.pno_network_a)
self.add_network_and_enable(self.pno_network_b)
# Force single scan so that both networks become preferred before PNO.
diff --git a/acts/tests/google/wifi/WifiRttManagerTest.py b/acts/tests/google/wifi/WifiRttManagerTest.py
index 743d895..f0985de 100644
--- a/acts/tests/google/wifi/WifiRttManagerTest.py
+++ b/acts/tests/google/wifi/WifiRttManagerTest.py
@@ -405,7 +405,7 @@
"""Tests"""
def test_invalid_params(self):
- """Tests the sanity check function in RttManager.
+ """Tests the check function in RttManager.
"""
param_list = [{
RttParam.device_type: 3
diff --git a/acts/tests/google/wifi/WifiScannerMultiScanTest.py b/acts/tests/google/wifi/WifiScannerMultiScanTest.py
index f8804bb..aebcaf7 100755
--- a/acts/tests/google/wifi/WifiScannerMultiScanTest.py
+++ b/acts/tests/google/wifi/WifiScannerMultiScanTest.py
@@ -296,7 +296,7 @@
return idx, wait_time, scan_channels
def validate_scan_results(self, scan_results_dict):
- # Sanity check to make sure the dict is not empty
+ # Check to make sure the dict is not empty
asserts.assert_true(scan_results_dict, "Scan result dict is empty.")
for scan_result_obj in scan_results_dict.values():
# Validate the results received for each scan setting
diff --git a/acts_tests/tests/google/bt/sar/BtSarSanityTest.py b/acts_tests/tests/google/bt/sar/BtSarSanityTest.py
index 6e49bbf..f4b41d5 100644
--- a/acts_tests/tests/google/bt/sar/BtSarSanityTest.py
+++ b/acts_tests/tests/google/bt/sar/BtSarSanityTest.py
@@ -59,7 +59,7 @@
try:
propagated_value = int(
re.findall(key_regex, device_state)[0])
- except TypeError:
+ except IndexError:
propagated_value = 'NA'
if enforced_state[key] == propagated_value:
@@ -116,17 +116,16 @@
scenario_power_cap = self.get_current_power_cap(self.dut,
start_time,
type=type)
- sar_df.loc[scenario, '{}_power_cap'.
- format(type)] = scenario_power_cap
- self.log.info(
- 'scenario: {}, '
- 'sar_power: {}, power_cap:{}'.format(
- scenario, sar_df.loc[scenario, column_name],
- sar_df.loc[scenario, '{}_power_cap'.format(type)]))
+ sar_df.loc[scenario,
+ '{}_power_cap'.format(type)] = scenario_power_cap
+ self.log.info('scenario: {}, '
+ 'sar_power: {}, power_cap:{}'.format(
+ scenario, sar_df.loc[scenario, column_name],
+ sar_df.loc[scenario,
+ '{}_power_cap'.format(type)]))
- if not sar_df['{}_power_cap'.format(type)].equals(
- sar_df[column_name]):
- power_cap_error = True
+ if not sar_df['{}_power_cap'.format(type)].equals(sar_df[column_name]):
+ power_cap_error = True
results_file_path = os.path.join(
self.log_path, '{}.csv'.format(self.current_test_name))
@@ -134,7 +133,7 @@
# Comparing read device power cap to expected device power cap
if power_cap_error:
- asserts.fail("Power Caps didn't match powers in the {} table")
+ asserts.fail("Power Caps didn't match powers in the SAR table")
else:
asserts.explicit_pass('Power Caps were set according to the table')
diff --git a/acts_tests/tests/google/gnss/GnssSanityTest.py b/acts_tests/tests/google/gnss/GnssFunctionTest.py
similarity index 98%
rename from acts_tests/tests/google/gnss/GnssSanityTest.py
rename to acts_tests/tests/google/gnss/GnssFunctionTest.py
index a13bb54..91293a3 100644
--- a/acts_tests/tests/google/gnss/GnssSanityTest.py
+++ b/acts_tests/tests/google/gnss/GnssFunctionTest.py
@@ -78,8 +78,8 @@
from acts.test_utils.tel.tel_test_utils import get_tcpdump_log
-class GnssSanityTest(BaseTestClass):
- """ GNSS Function Sanity Tests"""
+class GnssFunctionTest(BaseTestClass):
+ """ GNSS Function Tests"""
def setup_class(self):
super().setup_class()
self.ad = self.android_devices[0]
@@ -97,7 +97,7 @@
"default_gnss_signal_attenuation",
"weak_gnss_signal_attenuation",
"no_gnss_signal_attenuation", "gnss_init_error_list",
- "gnss_init_error_whitelist", "pixel_lab_location",
+ "gnss_init_error_allowlist", "pixel_lab_location",
"legacy_wifi_xtra_cs_criteria", "legacy_projects",
"qdsp6m_path", "supl_capabilities", "ttff_test_cycle",
"collect_logs"]
@@ -203,9 +203,9 @@
self.ad.log.info("There is no mcfg.version before push, "
"unmatching device")
return False
- except:
+ except Exception as e:
self.ad.log.info("There is no mcfg.version before push, "
- "unmatching device")
+ "unmatching device %s" % e)
return False
get_baseband_and_gms_version(self.ad, "Before push mcfg")
try:
@@ -371,11 +371,11 @@
for attr in self.gnss_init_error_list:
error = self.ad.adb.shell("logcat -d | grep -E '%s'" % attr)
if error:
- for whitelist in self.gnss_init_error_whitelist:
- if whitelist in error:
- error = re.sub(".*"+whitelist+".*\n?", "", error)
- self.ad.log.info("\"%s\" is white-listed and removed "
- "from error." % whitelist)
+ for allowlist in self.gnss_init_error_allowlist:
+ if allowlist in error:
+ error = re.sub(".*"+allowlist+".*\n?", "", error)
+ self.ad.log.info("\"%s\" is in allow-list and removed "
+ "from error." % allowlist)
if error:
error_mismatch = False
self.ad.log.error("\n%s" % error)
diff --git a/acts_tests/tests/google/power/bt/PowerBLEadvertiseTest.py b/acts_tests/tests/google/power/bt/PowerBLEadvertiseTest.py
index f5e24bd..1933699 100644
--- a/acts_tests/tests/google/power/bt/PowerBLEadvertiseTest.py
+++ b/acts_tests/tests/google/power/bt/PowerBLEadvertiseTest.py
@@ -20,34 +20,31 @@
import acts.test_utils.power.PowerBTBaseTest as PBtBT
BLE_LOCATION_SCAN_ENABLE = 'settings put secure location_mode 3'
-EXTRA_ADV_TIME = 10
-MONSOON_TAIL_CUT = 5
+EXTRA_ADV_TIME = 3
+ADV_TAIL = 5
class PowerBLEadvertiseTest(PBtBT.PowerBTBaseTest):
def __init__(self, configs):
super().__init__(configs)
- req_params = ['adv_modes', 'adv_power_levels', 'adv_duration']
+ req_params = ['adv_modes', 'adv_power_levels']
self.unpack_userparams(req_params)
# Loop all advertise modes and power levels
for adv_mode in self.adv_modes:
for adv_power_level in self.adv_power_levels:
- self.generate_test_case(adv_mode, adv_power_level,
- self.adv_duration)
+ self.generate_test_case(adv_mode, adv_power_level)
def setup_class(self):
super().setup_class()
self.dut.adb.shell(BLE_LOCATION_SCAN_ENABLE)
# Make sure during power measurement, advertisement is always on
- self.mon_info.duration = (self.adv_duration - self.mon_offset -
- EXTRA_ADV_TIME - MONSOON_TAIL_CUT)
+ self.adv_duration = self.mon_info.duration + self.mon_offset + ADV_TAIL + EXTRA_ADV_TIME
- def generate_test_case(self, adv_mode, adv_power_level, adv_duration):
+ def generate_test_case(self, adv_mode, adv_power_level):
def test_case_fn():
- self.measure_ble_advertise_power(adv_mode, adv_power_level,
- adv_duration)
+ self.measure_ble_advertise_power(adv_mode, adv_power_level)
adv_mode_str = bleenum.AdvertiseSettingsAdvertiseMode(adv_mode).name
adv_txpl_str = bleenum.AdvertiseSettingsAdvertiseTxPower(
@@ -55,10 +52,9 @@
test_case_name = ('test_BLE_{}_{}'.format(adv_mode_str, adv_txpl_str))
setattr(self, test_case_name, test_case_fn)
- def measure_ble_advertise_power(self, adv_mode, adv_power_level,
- adv_duration):
+ def measure_ble_advertise_power(self, adv_mode, adv_power_level):
btputils.start_apk_ble_adv(self.dut, adv_mode, adv_power_level,
- adv_duration)
+ self.adv_duration)
time.sleep(EXTRA_ADV_TIME)
self.measure_power_and_validate()
diff --git a/acts_tests/tests/google/power/bt/PowerBLEscanTest.py b/acts_tests/tests/google/power/bt/PowerBLEscanTest.py
index 91b11c3..f859f0c 100644
--- a/acts_tests/tests/google/power/bt/PowerBLEscanTest.py
+++ b/acts_tests/tests/google/power/bt/PowerBLEscanTest.py
@@ -20,39 +20,37 @@
import acts.test_utils.power.PowerBTBaseTest as PBtBT
BLE_LOCATION_SCAN_ENABLE = 'settings put secure location_mode 3'
-EXTRA_SCAN_TIME = 10
-MONSOON_TAIL_CUT = 5
+EXTRA_SCAN_TIME = 3
+SCAN_TAIL = 5
class PowerBLEscanTest(PBtBT.PowerBTBaseTest):
def __init__(self, configs):
super().__init__(configs)
- req_params = ['scan_modes', 'scan_duration']
+ req_params = ['scan_modes']
self.unpack_userparams(req_params)
for scan_mode in self.scan_modes:
- self.generate_test_case_no_devices_around(scan_mode,
- self.scan_duration)
+ self.generate_test_case_no_devices_around(scan_mode)
def setup_class(self):
super().setup_class()
self.dut.adb.shell(BLE_LOCATION_SCAN_ENABLE)
# Make sure during power measurement, scan is always on
- self.mon_info.duration = (self.scan_duration - self.mon_offset -
- EXTRA_SCAN_TIME - MONSOON_TAIL_CUT)
+ self.scan_duration = self.mon_info.duration + self.mon_offset + SCAN_TAIL + EXTRA_SCAN_TIME
- def generate_test_case_no_devices_around(self, scan_mode, scan_duration):
+ def generate_test_case_no_devices_around(self, scan_mode):
def test_case_fn():
- self.measure_ble_scan_power(scan_mode, scan_duration)
+ self.measure_ble_scan_power(scan_mode)
test_case_name = ('test_BLE_{}_no_advertisers'.format(
bleenum.ScanSettingsScanMode(scan_mode).name))
setattr(self, test_case_name, test_case_fn)
- def measure_ble_scan_power(self, scan_mode, scan_duration):
+ def measure_ble_scan_power(self, scan_mode):
- btputils.start_apk_ble_scan(self.dut, scan_mode, scan_duration)
+ btputils.start_apk_ble_scan(self.dut, scan_mode, self.scan_duration)
time.sleep(EXTRA_SCAN_TIME)
self.measure_power_and_validate()
diff --git a/acts_tests/tests/google/power/bt/PowerBTa2dpTest.py b/acts_tests/tests/google/power/bt/PowerBTa2dpTest.py
index 2473b9d..b6d60f1 100644
--- a/acts_tests/tests/google/power/bt/PowerBTa2dpTest.py
+++ b/acts_tests/tests/google/power/bt/PowerBTa2dpTest.py
@@ -60,18 +60,20 @@
else:
self.log.info('Current Codec is {}, no need to change'.format(
current_codec_type))
+ # Start music playing
+ self.media.play()
+ time.sleep(EXTRA_PLAY_TIME)
# Set attenuation so BT tx at desired power level
self.log.info('Current Attenuation {} dB'.format(
self.attenuator.get_atten()))
tpl = 'PL' + str(tpl)
- PBtBT.ramp_attenuation(self.attenuator, self.atten_pl_settings[tpl][0])
+ PBtBT.ramp_attenuation(self.attenuator, self.atten_pl_settings[tpl][0],
+ attenuation_step_max=1, time_wait_in_between=1)
self.log.info('Setting Attenuator to {} dB'.format(
self.atten_pl_settings[tpl][0]))
- self.media.play()
self.log.info('Running A2DP with codec {} at {}'.format(
codec_config['codec_type'], tpl))
self.dut.droid.goToSleepNow()
- time.sleep(EXTRA_PLAY_TIME)
self.measure_power_and_validate()
diff --git a/acts_tests/tests/google/power/bt/PowerBTcalibrationTest.py b/acts_tests/tests/google/power/bt/PowerBTcalibrationTest.py
index d6f0292..4b14f17 100644
--- a/acts_tests/tests/google/power/bt/PowerBTcalibrationTest.py
+++ b/acts_tests/tests/google/power/bt/PowerBTcalibrationTest.py
@@ -52,14 +52,17 @@
# Loop through attenuation in 1 dB step until reaching at PL10
self.log.info('Starting Calibration Process')
+ pl10_count = 0
for i in range(int(self.attenuator.get_max_atten())):
self.attenuator.set_atten(i)
bt_metrics_dict = btutils.get_bt_metric(self.dut)
- pwl = int(bt_metrics_dict['pwlv'][self.dut.serial])
+ pwl = bt_metrics_dict['pwlv'][self.dut.serial]
self.log.info('Reach PW {} at attenuation {} dB'.format(pwl, i))
self.cal_matrix.append([i, pwl])
if pwl == 10:
+ pl10_count += 1
+ if pl10_count > 5:
break
# Write cal results to csv
diff --git a/acts_tests/tests/google/tel/live/TelLiveStressTest.py b/acts_tests/tests/google/tel/live/TelLiveStressTest.py
index 9677ed6..7a2e766 100644
--- a/acts_tests/tests/google/tel/live/TelLiveStressTest.py
+++ b/acts_tests/tests/google/tel/live/TelLiveStressTest.py
@@ -103,6 +103,7 @@
EXCEPTION_TOLERANCE = 5
BINDER_LOGS = ["/sys/kernel/debug/binder"]
+DEFAULT_FILE_DOWNLOADS = ["1MB", "5MB", "10MB", "20MB", "50MB"]
class TelLiveStressTest(TelephonyBaseTest):
@@ -158,6 +159,7 @@
self.dut_capabilities = telephony_info.get("capabilities", [])
self.dut_wfc_modes = telephony_info.get("wfc_modes", [])
self.gps_log_file = self.user_params.get("gps_log_file", None)
+ self.file_name_list = self.user_params.get("file_downloads", DEFAULT_FILE_DOWNLOADS)
return True
def setup_test(self):
@@ -770,7 +772,7 @@
else:
return True
- def _data_download(self, file_names=["5MB", "10MB", "20MB", "50MB"]):
+ def _data_download(self, file_names=[]):
begin_time = get_current_epoch_time()
slot_id = random.randint(0,1)
if self.dsds_esim:
@@ -831,11 +833,7 @@
def data_test(self):
while time.time() < self.finishing_time:
try:
- operator_name = self.dut.adb.getprop("gsm.sim.operator.alpha")
- if CARRIER_SING in operator_name:
- self._data_download(file_names=["1MB", "5MB"])
- else:
- self._data_download()
+ self._data_download(self.file_name_list)
except Exception as e:
self.log.error("Exception error %s", str(e))
self.result_info["Exception Errors"] += 1