68.14.5 - testing

This commit is contained in:
Fedor 2024-11-25 17:14:31 +02:00
parent e0b5494ed1
commit 8c279e84a3
2893 changed files with 22892 additions and 11614 deletions

View File

@ -1,11 +1,12 @@
[ShowSSEConfig]
[TestPrintf]
[TestAppShellSteadyState]
[TestAlgorithm]
[TestArray]
[TestArrayUtils]
[TestAtomicBitflags]
[TestAtomics]
[TestBinarySearch]
[TestBitSet]
[TestBloomFilter]
[TestBufferList]
[TestCasting]
@ -39,8 +40,6 @@ skip-if = os != 'win'
[TestBaseProfiler]
[TestNonDereferenceable]
[TestNotNull]
[TestParseFTPList]
[TestPLDHash]
[TestPoisonArea]
skip-if = os == 'android' # Bug 1147630
[TestRange]
@ -55,7 +54,7 @@ skip-if = os == 'android' # Bug 1147630
[TestSaturate]
[TestSplayTree]
[TestSPSCQueue]
[TestSyncRunnable]
[TestTainting]
[TestTemplateLib]
[TestTextUtils]
[TestThreadSafeWeakPtr]
@ -66,22 +65,9 @@ skip-if = os == 'android' # Bug 1147630
[TestUtf8]
[TestVariant]
[TestVector]
[TestVolatileBuffer]
[TestWeakPtr]
[TestWrappingOperations]
[TestXorShift128PlusRNG]
[buffered_stun_socket_unittest]
[ice_unittest]
[test_nr_socket_unittest]
[jsapi-tests]
[multi_tcp_socket_unittest]
[nrappkit_unittest]
[rlogringbuffer_unittest]
[runnable_utils_unittest]
[sctp_unittest]
[signaling_unittests]
[signaling_unittests_standalone]
[simpletokenbucket_unittest]
[sockettransportservice_unittest]
[transport_unittests]
[turn_unittest]

View File

@ -25,7 +25,7 @@ def smooth_scroll(marionette_session, start_element, axis, direction,
wait_period = wait_period or 0.05
scroll_back = scroll_back or False
current = 0
if axis is "x":
if axis == "x":
if direction is -1:
offset = [-increments, 0]
else:

View File

@ -184,7 +184,7 @@ cookie.add = function(newCookie, { restrictToHost = null } = {}) {
newCookie.session,
newCookie.expiry,
{} /* origin attributes */,
Ci.nsICookie.SAMESITE_UNSET
Ci.nsICookie.SAMESITE_NONE
);
} catch (e) {
throw new UnableToSetCookieError(e);

View File

@ -2,13 +2,15 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from __future__ import absolute_import, print_function
import glob
import os
import platform
import shutil
import sys
import unittest
from io import StringIO
from marionette_driver import Wait
from marionette_driver.errors import (
@ -99,6 +101,25 @@ class BaseCrashTestCase(MarionetteTestCase):
class TestCrash(BaseCrashTestCase):
def setUp(self):
if os.environ.get('MOZ_AUTOMATION'):
# Capture stdout, otherwise the Gecko output causes mozharness to fail
# the task due to "A content process has crashed" appearing in the log.
# To view stdout for debugging, use `print(self.new_out.getvalue())`
print("Suppressing GECKO output. To view, add `print(self.new_out.getvalue())` "
"to the end of this test.")
self.new_out, self.new_err = StringIO(), StringIO()
self.old_out, self.old_err = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.new_out, self.new_err
super(TestCrash, self).setUp()
def tearDown(self):
super(TestCrash, self).tearDown()
if os.environ.get('MOZ_AUTOMATION'):
sys.stdout, sys.stderr = self.old_out, self.old_err
@unittest.skipIf(platform.machine() == "ARM64" and platform.system() == "Windows",
"Bug 1540784 - crashreporter related issues on Windows 10 aarch64. ")
def test_crash_chrome_process(self):

View File

@ -452,7 +452,7 @@ class MachCommands(MachCommandBase):
from mozrunner.devices.android_device import verify_android_device
app = kwargs.get('app')
if not app:
app = self.substs["ANDROID_PACKAGE_NAME"]
app = "org.mozilla.geckoview.test"
device_serial = kwargs.get('deviceSerial')
# verify installation
@ -566,7 +566,7 @@ class RobocopCommands(MachCommandBase):
# verify installation
app = kwargs.get('app')
if not app:
app = self.substs["ANDROID_PACKAGE_NAME"]
kwargs['app'] = app = self.substs["ANDROID_PACKAGE_NAME"]
device_serial = kwargs.get('deviceSerial')
verify_android_device(self, install=True, xre=False, network=True,
app=app, device_serial=device_serial)

View File

@ -60,7 +60,7 @@ def run_mochitest_desktop(context, args):
def run_mochitest_android(context, args):
args.app = args.app or 'org.mozilla.fennec'
args.app = args.app or 'org.mozilla.geckoview.test'
args.extraProfileFiles.append(os.path.join(context.package_root, 'mochitest', 'fonts'))
args.utilityPath = context.hostutils
args.xrePath = context.hostutils

View File

@ -946,10 +946,7 @@ class AndroidArguments(ArgumentContainer):
options.webServer = options.remoteWebServer
if options.app is None:
if build_obj:
options.app = build_obj.substs['ANDROID_PACKAGE_NAME']
else:
parser.error("You must specify either appPath or app")
options.app = "org.mozilla.geckoview.test"
if build_obj and 'MOZ_HOST_BIN' in os.environ:
options.xrePath = os.environ['MOZ_HOST_BIN']

View File

@ -89,8 +89,6 @@ BROWSER_CHROME_MANIFESTS += [
TEST_HARNESS_FILES.testing.mochitest += [
'/build/mobile/remoteautomation.py',
'/build/pgo/server-locations.txt',
'/build/sanitizers/lsan_suppressions.txt',
'/build/sanitizers/ubsan_suppressions.txt',
'/build/valgrind/cross-architecture.sup',
'/build/valgrind/i386-pc-linux-gnu.sup',
'/build/valgrind/x86_64-pc-linux-gnu.sup',

View File

@ -1616,21 +1616,15 @@ toolbar#nav-bar {
def buildBrowserEnv(self, options, debugger=False, env=None):
"""build the environment variables for the specific test and operating system"""
if mozinfo.info["asan"] and mozinfo.isLinux and mozinfo.bits == 64:
lsanPath = SCRIPT_DIR
useLSan = True
else:
lsanPath = None
if mozinfo.info["ubsan"]:
ubsanPath = SCRIPT_DIR
else:
ubsanPath = None
useLSan = False
browserEnv = self.environment(
xrePath=options.xrePath,
env=env,
debugger=debugger,
lsanPath=lsanPath,
ubsanPath=ubsanPath)
useLSan=useLSan)
if hasattr(options, "topsrcdir"):
browserEnv["MOZ_DEVELOPER_REPO_DIR"] = options.topsrcdir
@ -2575,6 +2569,7 @@ toolbar#nav-bar {
'dom.serviceWorkers.parent_intercept', False),
"socketprocess_e10s": self.extraPrefs.get(
'network.process.enabled', False),
"webrender": options.enable_webrender,
})
self.setTestRoot(options)
@ -3113,13 +3108,6 @@ def run_test_harness(parser, options):
if hasattr(options, 'log'):
delattr(options, 'log')
# windows10-aarch64 does not yet support crashreporter testing.
# see https://bugzilla.mozilla.org/show_bug.cgi?id=1536221
if mozinfo.os == "win" and mozinfo.processor == "aarch64":
# manually override the mozinfo.crashreporter value after MochitestDesktop
# is instantiated.
mozinfo.update({u"crashreporter": False})
options.runByManifest = False
if options.flavor in ('plain', 'browser', 'chrome'):
options.runByManifest = True

View File

@ -2252,8 +2252,6 @@ class ADBDevice(ADBCommand):
local = '/'.join(local.rstrip('/').split('/')[:-1])
try:
self.command_output(["pull", remote, local], timeout=timeout)
except BaseException:
raise
finally:
if copy_required:
dir_util.copy_tree(local, original_local)

View File

@ -66,7 +66,7 @@ info = {'os': unknown,
'os_version': unknown,
'bits': unknown,
'has_sandbox': unknown,
'webrender': bool(os.environ.get("MOZ_WEBRENDER", False)),
'webrender': False,
'automation': bool(os.environ.get("MOZ_AUTOMATION", False)),
}
(system, node, release, version, machine, processor) = platform.uname()

View File

@ -224,7 +224,7 @@ def uninstall(install_folder):
try:
cmdArgs = ['%s\\uninstall\helper.exe' % install_folder, '/S']
result = subprocess.call(cmdArgs)
if result is not 0:
if result != 0:
raise Exception('Execution of uninstaller failed.')
# The uninstaller spawns another process so the subprocess call

View File

@ -26,6 +26,7 @@ class DeviceRunner(BaseRunner):
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'MOZ_CRASHREPORTER_SHUTDOWN': '1',
'MOZ_HIDE_RESULTS_TABLE': '1',
'MOZ_IN_AUTOMATION': '1',
'MOZ_LOG': 'signaling:3,mtransport:4,DataChannel:4,jsep:4',
'R_LOG_LEVEL': '6',
'R_LOG_DESTINATION': 'stderr',

View File

@ -20,8 +20,6 @@ from distutils.spawn import find_executable
import psutil
import six.moves.urllib as urllib
from mozdevice import ADBHost, ADBDevice
from mozprocess import ProcessHandler
from six.moves.urllib.parse import urlparse
EMULATOR_HOME_DIR = os.path.join(os.path.expanduser('~'), '.mozbuild', 'android-device')
@ -227,7 +225,7 @@ def verify_android_device(build_obj, install=False, xre=False, debugger=False,
# - it prevents testing against other builds (downloaded apk)
# - installation may take a couple of minutes.
if not app:
app = build_obj.substs["ANDROID_PACKAGE_NAME"]
app = "org.mozilla.geckoview.test"
device = _get_device(build_obj.substs, device_serial)
response = ''
action = 'Re-install'
@ -332,43 +330,10 @@ def get_adb_path(build_obj):
return _find_sdk_exe(build_obj.substs, 'adb', False)
def run_firefox_for_android(build_obj, params, **kwargs):
"""
Launch Firefox for Android on the connected device.
Optional 'params' allow parameters to be passed to Firefox.
"""
device = _get_device(build_obj.substs)
try:
#
# Construct an adb command similar to:
#
# $ adb shell am start -a android.activity.MAIN \
# -n org.mozilla.fennec_$USER \
# -d <url param> \
# --es args "<params>"
#
app = build_obj.substs['ANDROID_PACKAGE_NAME']
msg = "URL specified as '{}'; dropping URL-like parameter '{}'"
if params:
for p in params:
if urlparse.urlparse(p).scheme != "":
params.remove(p)
if kwargs.get('url'):
_log_warning(msg.format(kwargs['url'], p))
else:
kwargs['url'] = p
device.launch_fennec(app, extra_args=params, **kwargs)
except Exception:
_log_warning("unable to launch Firefox for Android")
return 1
return 0
def grant_runtime_permissions(build_obj, app, device_serial=None):
"""
Grant required runtime permissions to the specified app
(typically org.mozilla.fennec_$USER).
(eg. org.mozilla.geckoview.test).
"""
device = _get_device(build_obj.substs, device_serial)
try:
@ -424,11 +389,14 @@ class AndroidEmulator(object):
Returns True if the Android emulator is running.
"""
for proc in psutil.process_iter():
name = proc.name()
# On some platforms, "emulator" may start an emulator with
# process name "emulator64-arm" or similar.
if name and name.startswith('emulator'):
return True
try:
name = proc.name()
# On some platforms, "emulator" may start an emulator with
# process name "emulator64-arm" or similar.
if name and name.startswith('emulator'):
return True
except Exception as e:
_log_debug("failed to get process name: %s" % str(e))
return False
def is_available(self):
@ -500,11 +468,6 @@ class AndroidEmulator(object):
auth_file = open(EMULATOR_AUTH_FILE, 'w')
auth_file.close()
def outputHandler(line):
self.emulator_log.write("<%s>\n" % line)
if "Invalid value for -gpu" in line or "Invalid GPU mode" in line:
self.gpu = False
env = os.environ
env['ANDROID_AVD_HOME'] = os.path.join(EMULATOR_HOME_DIR, "avd")
command = [self.emulator_path, "-avd", self.avd_info.name]
@ -517,17 +480,14 @@ class AndroidEmulator(object):
self.avd_info.extra_args.remove('-enable-kvm')
command += self.avd_info.extra_args
log_path = os.path.join(EMULATOR_HOME_DIR, 'emulator.log')
self.emulator_log = open(log_path, 'w')
self.emulator_log = open(log_path, 'w+')
_log_debug("Starting the emulator with this command: %s" %
' '.join(command))
_log_debug("Emulator output will be written to '%s'" %
log_path)
self.proc = ProcessHandler(
command, storeOutput=False, processOutputLine=outputHandler,
stdin=subprocess.PIPE, env=env, ignore_children=True)
self.proc.run()
_log_debug("Emulator started with pid %d" %
int(self.proc.proc.pid))
self.proc = subprocess.Popen(command, env=env, stdin=subprocess.PIPE,
stdout=self.emulator_log, stderr=self.emulator_log)
_log_debug("Emulator started with pid %d" % int(self.proc.pid))
def wait_for_start(self):
"""
@ -581,7 +541,16 @@ class AndroidEmulator(object):
return True
def check_completed(self):
if self.proc.proc.poll() is not None:
if self.proc.poll() is not None:
if self.gpu:
try:
for line in self.emulator_log.readlines():
if "Invalid value for -gpu" in line or "Invalid GPU mode" in line:
self.gpu = False
break
except Exception as e:
_log_warning(str(e))
if not self.gpu and not self.restarted:
_log_warning("Emulator failed to start. Your emulator may be out of date.")
_log_warning("Trying to restart the emulator without -gpu argument.")
@ -671,7 +640,7 @@ class AndroidEmulator(object):
tn.close()
if not telnet_ok:
time.sleep(10)
if self.proc.proc.poll() is not None:
if self.proc.poll() is not None:
_log_warning("Emulator has already completed!")
return False
return telnet_ok
@ -820,21 +789,14 @@ def _get_tooltool_manifest(substs, src_path, dst_path, filename):
def _tooltool_fetch():
def outputHandler(line):
_log_debug(line)
tooltool_full_path = os.path.abspath(TOOLTOOL_PATH)
command = [sys.executable, tooltool_full_path,
'fetch', '-o', '-m', 'releng.manifest']
proc = ProcessHandler(
command, processOutputLine=outputHandler, storeOutput=False,
cwd=EMULATOR_HOME_DIR)
proc.run()
try:
proc.wait()
except Exception:
if proc.poll() is None:
proc.kill(signal.SIGTERM)
response = subprocess.check_output(command, cwd=EMULATOR_HOME_DIR)
_log_debug(response)
except Exception as e:
_log_warning(str(e))
def _get_host_platform():
@ -904,10 +866,7 @@ def _verify_kvm(substs):
emulator_path = 'emulator'
command = [emulator_path, '-accel-check']
try:
p = ProcessHandler(command, storeOutput=True)
p.run()
p.wait()
out = p.output
out = subprocess.check_output(command)
if 'is installed and usable' in ''.join(out):
return
except Exception as e:

View File

@ -59,14 +59,26 @@ class Device(object):
"""
remote_dump_dir = posixpath.join(self.app_ctx.remote_profile, 'minidumps')
local_dump_dir = tempfile.mkdtemp()
if not self.device.is_dir(remote_dump_dir):
# This may be a hint that something went wrong during browser
# start-up if (MOZ_CRASHREPORTER=1)
print("WARNING: No crash directory {} found on remote device".format(remote_dump_dir))
try:
self.device.pull(remote_dump_dir, local_dump_dir)
except ADBError as e:
# OK if directory not present -- sometimes called before browser start
if 'does not exist' not in str(e):
raise
try:
shutil.rmtree(local_dump_dir)
except Exception:
pass
finally:
raise e
else:
print("WARNING: {}".format(e))
if os.listdir(local_dump_dir):
self.device.rm(remote_dump_dir, recursive=True)
self.device.mkdir(remote_dump_dir)
return local_dump_dir
def setup_profile(self, profile):

View File

@ -85,7 +85,7 @@ def _raw_log():
def test_environment(xrePath, env=None, crashreporter=True, debugger=False,
lsanPath=None, ubsanPath=None, log=None):
useLSan=False, log=None):
"""
populate OS environment variables for mochitest and reftests.
@ -182,20 +182,12 @@ def test_environment(xrePath, env=None, crashreporter=True, debugger=False,
else:
message = message % 'default memory'
if lsanPath:
if useLSan:
log.info("LSan enabled.")
asanOptions.append('detect_leaks=1')
lsanOptions = ["exitcode=0"]
# Uncomment out the next line to report the addresses of leaked objects.
# lsanOptions.append("report_objects=1")
suppressionsFile = os.path.join(
lsanPath, 'lsan_suppressions.txt')
if os.path.exists(suppressionsFile):
log.info("LSan using suppression file " + suppressionsFile)
lsanOptions.append("suppressions=" + suppressionsFile)
else:
log.info("WARNING | runtests.py | LSan suppressions file"
" does not exist! " + suppressionsFile)
env["LSAN_OPTIONS"] = ':'.join(lsanOptions)
if len(asanOptions):
@ -224,18 +216,7 @@ def test_environment(xrePath, env=None, crashreporter=True, debugger=False,
ubsan = bool(mozinfo.info.get("ubsan"))
if ubsan and (mozinfo.isLinux or mozinfo.isMac):
if ubsanPath:
log.info("UBSan enabled.")
ubsanOptions = []
suppressionsFile = os.path.join(
ubsanPath, 'ubsan_suppressions.txt')
if os.path.exists(suppressionsFile):
log.info("UBSan using suppression file " + suppressionsFile)
ubsanOptions.append("suppressions=" + suppressionsFile)
else:
log.info("WARNING | runtests.py | UBSan suppressions file"
" does not exist! " + suppressionsFile)
env["UBSAN_OPTIONS"] = ':'.join(ubsanOptions)
log.info("UBSan enabled.")
return env

View File

@ -13,5 +13,5 @@ config = {
'install',
'run-tests',
],
"output_directory": "/sdcard",
"output_directory": "/sdcard/pgo_profile",
}

View File

@ -21,30 +21,6 @@ config = {
{'filename': '/builds/mozilla-fennec-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-fennec-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/adjust-sdk-sandbox.token'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/adjust-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-release.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-release.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-beta.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-nightly.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-nightly.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/pocket-api-release.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-release.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/pocket-api-sandbox.token'},
{'filename': '/builds/pocket-api-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-beta.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/pocket-api-sandbox.token'},
{'filename': '/builds/pocket-api-nightly.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-nightly.token',
'min_scm_level': 2, 'default-file': '{abs_src_dir}/mobile/android/base/pocket-api-sandbox.token'},
],
'vcs_share_base': '/builds/hg-shared',
'objdir': 'obj-firefox',
@ -73,14 +49,8 @@ config = {
'MINIDUMP_SAVE_PATH': '%(base_work_dir)s/minidumps',
},
'src_mozconfig': 'mobile/android/config/mozconfigs/android/nightly',
# Bug 1583594: GeckoView doesn't (yet) produce have a package file
# from which to extract package metrics.
'disable_package_metrics': True,
#########################################################################
# It's not obvious, but postflight_build is after packaging, so the Gecko
# binaries are in the object directory, ready to be packaged into the
# GeckoView AAR.
'postflight_build_mach_commands': [
['android',
'archive-geckoview',
],
],
}

View File

@ -12,7 +12,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
'secret_files': [
@ -25,12 +24,6 @@ config = {
{'filename': '/builds/mozilla-desktop-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
],
'vcs_share_base': '/builds/hg-shared',
#########################################################################

View File

@ -11,7 +11,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
'secret_files': [
@ -24,12 +23,6 @@ config = {
{'filename': '/builds/mozilla-desktop-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
],
'vcs_share_base': '/builds/hg-shared',
#########################################################################

View File

@ -19,12 +19,6 @@ config = {
{'filename': '/builds/mozilla-desktop-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
],
'vcs_share_base': '/builds/hg-shared',
#########################################################################

View File

@ -19,12 +19,6 @@ config = {
{'filename': '/builds/mozilla-desktop-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
],
'vcs_share_base': '/builds/hg-shared',
#########################################################################

View File

@ -4,9 +4,6 @@ config = {
'multi_locale_config_platform': 'android',
'debug_build': True,
'postflight_build_mach_commands': [
['android',
'archive-geckoview',
],
['android',
'archive-coverage-artifacts',
],

View File

@ -3,7 +3,6 @@ config = {
'src_mozconfig': 'mobile/android/config/mozconfigs/android-api-16-gradle-dependencies/nightly',
'multi_locale_config_platform': 'android',
# gradle-dependencies doesn't produce a package. So don't collect package metrics.
'disable_package_metrics': True,
'postflight_build_mach_commands': [
['android',
'gradle-dependencies',

View File

@ -1,5 +0,0 @@
config = {
'stage_platform': 'android-api-16',
'src_mozconfig': 'mobile/android/config/mozconfigs/android-api-16/nightly-without-google-play-services',
'multi_locale_config_platform': 'android',
}

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'linux64-asan',
#### 64 bit build specific #####

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'linux64-asan-debug',
'debug_build': True,

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
'valgrind-test',
],
'stage_platform': 'linux64-valgrind',

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'macosx64-debug',
'debug_build': True,

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'win32-add-on-devel',
#### 32 bit build specific #####

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'win32-debug',
'debug_build': True,

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'win64-add-on-devel',
#### 64 bit build specific #####

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'clobber',
'build',
'check-test',
],
'stage_platform': 'win64-debug',
'debug_build': True,

View File

@ -4,7 +4,6 @@ config = {
'default_actions': [
'get-secrets',
'build',
'check-test',
],
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
'vcs_share_base': os.path.join('y:', os.sep, 'hg-shared'),

View File

@ -22,12 +22,6 @@ config = {
for f in ["debug", "nightly"]
] + [
# File, from, to
("browser/confvars.sh",
"ACCEPTED_MAR_CHANNEL_IDS=firefox-mozilla-beta,firefox-mozilla-release",
"ACCEPTED_MAR_CHANNEL_IDS=firefox-mozilla-release"),
("browser/confvars.sh",
"MAR_CHANNEL_ID=firefox-mozilla-beta",
"MAR_CHANNEL_ID=firefox-mozilla-release"),
],
"vcs_share_base": os.path.join(ABS_WORK_DIR, 'hg-shared'),

View File

@ -19,14 +19,6 @@ config = {
"browser/config/mozconfigs/win64/l10n-mozconfig",
"browser/config/mozconfigs/win64-aarch64/l10n-mozconfig",
"browser/config/mozconfigs/macosx64/l10n-mozconfig"]
] + [
# File, from, to
("browser/confvars.sh",
"ACCEPTED_MAR_CHANNEL_IDS=firefox-mozilla-central",
"ACCEPTED_MAR_CHANNEL_IDS=firefox-mozilla-beta,firefox-mozilla-release"),
("browser/confvars.sh",
"MAR_CHANNEL_ID=firefox-mozilla-central",
"MAR_CHANNEL_ID=firefox-mozilla-beta"),
] + [
# File, from, to
("build/mozconfig.common",

View File

@ -5,8 +5,9 @@ platform = "win32"
config = {
"repack_id": os.environ.get("REPACK_ID"),
# ToolTool
'tooltool_cache': os.environ.get('TOOLTOOL_CACHE'),
'run_configure': False,
'env': {
'PATH': "%(abs_input_dir)s/upx/bin:%(PATH)s",
}
}

View File

@ -5,9 +5,9 @@ platform = "win32"
config = {
"locale": os.environ.get("LOCALE"),
# ToolTool
'tooltool_url': 'https://tooltool.mozilla-releng.net/',
'tooltool_cache': os.environ.get('TOOLTOOL_CACHE'),
'run_configure': False,
'env': {
'PATH': "%(abs_input_dir)s/upx/bin:%(PATH)s",
}
}

View File

@ -5,8 +5,9 @@ platform = "win64"
config = {
"repack_id": os.environ.get("REPACK_ID"),
# ToolTool
'tooltool_cache': os.environ.get('TOOLTOOL_CACHE'),
'run_configure': False,
'env': {
'PATH': "%(abs_input_dir)s/upx/bin:%(PATH)s",
}
}

View File

@ -5,8 +5,9 @@ platform = "win64"
config = {
"locale": os.environ.get("LOCALE"),
# ToolTool
'tooltool_cache': os.environ.get('TOOLTOOL_CACHE'),
'run_configure': False,
'env': {
'PATH': "%(abs_input_dir)s/upx/bin:%(PATH)s",
}
}

View File

@ -29,30 +29,5 @@ config = {
{'filename': '/builds/mozilla-fennec-geoloc-api.key',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/mozilla-fennec-geoloc-api.key',
'min_scm_level': 2, 'default': 'try-build-has-no-secrets'},
{'filename': '/builds/adjust-sdk.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/adjust-sdk-sandbox.token'},
{'filename': '/builds/adjust-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/adjust-sdk-beta.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/adjust-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-release.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-release.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-beta.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/leanplum-sdk-nightly.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/leanplum-sdk-nightly.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/leanplum-sdk-sandbox.token'},
{'filename': '/builds/pocket-api-release.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-release.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/pocket-api-sandbox.token'},
{'filename': '/builds/pocket-api-beta.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-beta.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/pocket-api-sandbox.token'},
{'filename': '/builds/pocket-api-nightly.token',
'secret_name': 'project/releng/gecko/build/level-%(scm-level)s/pocket-api-nightly.token',
'min_scm_level': 2, 'default-file': '{abs_mozilla_dir}/mobile/android/base/pocket-api-sandbox.token'},
],
}

View File

@ -30,7 +30,6 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"minidump_stackwalk_path": MINIDUMP_STACKWALK_PATH,

View File

@ -20,7 +20,6 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"minidump_stackwalk_path": MINIDUMP_STACKWALK_PATH,

View File

@ -13,7 +13,6 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"run_cmd_checks_enabled": True,

View File

@ -22,18 +22,9 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"tooltool_cache": os.path.join('c:\\', 'build', 'tooltool_cache'),
"minidump_stackwalk_path": "win32-minidump_stackwalk.exe",
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest",
"python3_manifest": {
"win32": "python3.manifest",
"win64": "python3_x64.manifest",
},
"env": {
# python3 requires C runtime, found in firefox installation; see bug 1361732
"PATH": "%(PATH)s;c:\\slave\\test\\build\\application\\firefox;"
}
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest"
}

View File

@ -19,18 +19,9 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"tooltool_cache": os.path.join('Y:\\', 'tooltool-cache'),
"minidump_stackwalk_path": "win32-minidump_stackwalk.exe",
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest",
"python3_manifest": {
"win32": "python3.manifest",
"win64": "python3_x64.manifest",
},
"env": {
# python3 requires C runtime, found in firefox installation; see bug 1361732
"PATH": "%(PATH)s;%(CD)s\\build\\application\\firefox;"
}
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest"
}

View File

@ -21,18 +21,9 @@ config = {
"populate-webroot",
"create-virtualenv",
"install",
"setup-mitmproxy",
"run-tests",
],
"tooltool_cache": os.path.join('c:\\', 'build', 'tooltool_cache'),
"minidump_stackwalk_path": "win32-minidump_stackwalk.exe",
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest",
"python3_manifest": {
"win32": "python3.manifest",
"win64": "python3_x64.manifest",
},
"env": {
# python3 requires C runtime, found in firefox installation; see bug 1361732
"PATH": "%(PATH)s;c:\\slave\\test\\build\\application\\firefox;"
}
"minidump_tooltool_manifest_path": "config/tooltool-manifests/win32/releng.manifest"
}

View File

@ -14,7 +14,7 @@ config = {
"--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
"--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
"--certutil-binary=%(xre_path)s/certutil",
"--product=fennec",
"--product=firefox_android",
],
"avds_dir": "/builds/worker/workspace/build/.android",
"binary_path": "/tmp",

View File

@ -40,8 +40,6 @@ from mozharness.mozilla.automation import (
TBPL_WORST_LEVEL_TUPLE,
)
from mozharness.mozilla.secrets import SecretsMixin
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
from mozharness.mozilla.testing.unittest import tbox_print_summary
from mozharness.base.python import (
PerfherderResourceOptionsMixin,
VirtualenvMixin,
@ -102,69 +100,6 @@ class MakeUploadOutputParser(OutputParser):
self.info(line)
class CheckTestCompleteParser(OutputParser):
tbpl_error_list = TBPL_UPLOAD_ERRORS
def __init__(self, **kwargs):
self.matches = {}
super(CheckTestCompleteParser, self).__init__(**kwargs)
self.pass_count = 0
self.fail_count = 0
self.leaked = False
self.harness_err_re = TinderBoxPrintRe['harness_error']['full_regex']
self.tbpl_status = TBPL_SUCCESS
def parse_single_line(self, line):
# Counts and flags.
# Regular expression for crash and leak detections.
if "TEST-PASS" in line:
self.pass_count += 1
return self.info(line)
if "TEST-UNEXPECTED-" in line:
# Set the error flags.
# Or set the failure count.
m = self.harness_err_re.match(line)
if m:
r = m.group(1)
if r == "missing output line for total leaks!":
self.leaked = None
else:
self.leaked = True
self.fail_count += 1
return self.warning(line)
self.info(line) # else
def evaluate_parser(self, return_code, success_codes=None):
success_codes = success_codes or [0]
if self.num_errors: # ran into a script error
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
if self.fail_count > 0:
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# Account for the possibility that no test summary was output.
if (self.pass_count == 0 and self.fail_count == 0 and
os.environ.get('TRY_SELECTOR') != 'coverage'):
self.error('No tests run or test summary not found')
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
if return_code not in success_codes:
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# Print the summary.
summary = tbox_print_summary(self.pass_count,
self.fail_count,
self.leaked)
self.info("TinderboxPrint: check<br/>%s\n" % summary)
return self.tbpl_status
class MozconfigPathError(Exception):
"""
There was an error getting a mozconfig path from a mozharness config.
@ -358,8 +293,6 @@ class BuildOptionParser(object):
'api-16-debug-searchfox': 'builds/releng_sub_%s_configs/%s_api_16_debug_searchfox.py',
'api-16-gradle': 'builds/releng_sub_%s_configs/%s_api_16_gradle.py',
'api-16-profile-generate': 'builds/releng_sub_%s_configs/%s_api_16_profile_generate.py',
'api-16-without-google-play-services':
'builds/releng_sub_%s_configs/%s_api_16_without_google_play_services.py',
'rusttests': 'builds/releng_sub_%s_configs/%s_rusttests.py',
'rusttests-debug': 'builds/releng_sub_%s_configs/%s_rusttests_debug.py',
'x86': 'builds/releng_sub_%s_configs/%s_x86.py',
@ -558,19 +491,6 @@ BUILD_BASE_CONFIG_OPTIONS = [
"type": "string",
"dest": "branch",
"help": "This sets the branch we will be building this for."}],
[['--scm-level'], {
"action": "store",
"type": "int",
"dest": "scm_level",
"default": 1,
"help": "This sets the SCM level for the branch being built."
" See https://www.mozilla.org/en-US/about/"
"governance/policies/commit/access-policy/"}],
[['--enable-pgo'], {
"action": "store_true",
"dest": "pgo_build",
"default": False,
"help": "Sets the build to run in PGO mode"}],
[['--enable-nightly'], {
"action": "store_true",
"dest": "nightly_build",
@ -756,9 +676,6 @@ items from that key's value."
if c.get('version_file'):
env['MOZ_VERSION_FILE'] = c['version_file']
if self.config.get('pgo_build') or self._compile_against_pgo():
env['MOZ_PGO'] = '1'
return env
def query_mach_build_env(self, multiLocale=None):
@ -778,34 +695,6 @@ items from that key's value."
'en-US')
return mach_env
def _compile_against_pgo(self):
"""determines whether a build should be run with pgo even if it is
not a classified as a 'pgo build'.
requirements:
1) must be a platform that can run against pgo
2) must be a nightly build
"""
c = self.config
if self.stage_platform in c['pgo_platforms']:
if self.query_is_nightly():
return True
return False
def query_check_test_env(self):
c = self.config
dirs = self.query_abs_dirs()
check_test_env = {}
if c.get('check_test_env'):
for env_var, env_value in c['check_test_env'].iteritems():
check_test_env[env_var] = env_value % dirs
# Check tests don't upload anything, however our mozconfigs depend on
# UPLOAD_PATH, so we prevent configure from re-running by keeping the
# environments consistent.
if c.get('upload_env'):
check_test_env.update(c['upload_env'])
return check_test_env
def _rm_old_package(self):
"""rm the old package."""
c = self.config
@ -1173,41 +1062,6 @@ items from that key's value."
env=env, output_timeout=60*45, halt_on_failure=True,
)
def check_test(self):
if os.environ.get('USE_ARTIFACT'):
self.info('Skipping due to forced artifact build.')
return
c = self.config
dirs = self.query_abs_dirs()
env = self.query_build_env()
env.update(self.query_check_test_env())
cmd = self._query_mach() + [
'--log-no-times',
'build',
'-v',
'--keep-going',
'check',
]
parser = CheckTestCompleteParser(config=c,
log_obj=self.log_obj)
return_code = self.run_command(command=cmd,
cwd=dirs['abs_src_dir'],
env=env,
output_parser=parser)
tbpl_status = parser.evaluate_parser(return_code)
return_code = EXIT_STATUS_DICT[tbpl_status]
if return_code:
self.return_code = self.worst_level(
return_code, self.return_code,
AUTOMATION_EXIT_CODES[::-1]
)
self.error("'mach build check' did not run successfully. Please "
"check log for errors.")
def _is_configuration_shipped(self):
"""Determine if the current build configuration is shipped to users.
@ -1220,10 +1074,6 @@ items from that key's value."
# configs we need to be reset and we don't like requiring boilerplate
# in derived configs.
# All PGO builds are shipped. This takes care of Linux and Windows.
if self.config.get('pgo_build'):
return True
# Debug builds are never shipped.
if self.config.get('debug_build'):
return False
@ -1297,7 +1147,6 @@ items from that key's value."
yield {
'name': 'sccache hit rate',
'value': hits,
'extraOptions': self.perfherder_resource_options(),
'subtests': [],
'lowerIsBetter': False
}
@ -1305,7 +1154,6 @@ items from that key's value."
yield {
'name': 'sccache cache_write_errors',
'value': stats['stats']['cache_write_errors'],
'extraOptions': self.perfherder_resource_options(),
'alertThreshold': 50.0,
'subtests': [],
}
@ -1313,7 +1161,6 @@ items from that key's value."
yield {
'name': 'sccache requests_not_cacheable',
'value': stats['stats']['requests_not_cacheable'],
'extraOptions': self.perfherder_resource_options(),
'alertThreshold': 50.0,
'subtests': [],
}
@ -1410,8 +1257,8 @@ items from that key's value."
Returns a dictionary of sections and their sizes.
"""
# Check for `rust_size`, our cross platform version of size. It should
# be installed by tooltool in $abs_src_dir/rust-size/rust-size
rust_size = os.path.join(self.query_abs_dirs()['abs_src_dir'],
# be fetched by run-task in $MOZ_FETCHES_DIR/rust-size/rust-size
rust_size = os.path.join(os.environ['MOZ_FETCHES_DIR'],
'rust-size', 'rust-size')
size_prog = self.which(rust_size)
if not size_prog:
@ -1604,7 +1451,8 @@ items from that key's value."
'''If sccache was in use for this build, shut down the sccache server.'''
if os.environ.get('USE_SCCACHE') == '1':
topsrcdir = self.query_abs_dirs()['abs_src_dir']
sccache = os.path.join(topsrcdir, 'sccache2', 'sccache')
sccache_base = os.environ['MOZ_FETCHES_DIR']
sccache = os.path.join(sccache_base, 'sccache', 'sccache')
if self._is_windows():
sccache += '.exe'
self.run_command([sccache, '--stop-server'], cwd=topsrcdir)

View File

@ -121,7 +121,7 @@ TestPassed = [
{'regex': re.compile('''(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )'''), 'level': INFO},
]
HarnessErrorList = [
BaseHarnessErrorList = [
{'substr': 'TEST-UNEXPECTED', 'level': ERROR, },
{'substr': 'PROCESS-CRASH', 'level': ERROR, },
{'regex': re.compile('''ERROR: (Address|Leak)Sanitizer'''), 'level': ERROR, },
@ -130,6 +130,13 @@ HarnessErrorList = [
{'substr': 'Pure virtual function called!', 'level': ERROR, },
]
HarnessErrorList = BaseHarnessErrorList + [
{'substr': 'A content process crashed', 'level': ERROR, },
]
# wpt can have expected crashes so we can't always turn treeherder orange in those cases
WptHarnessErrorList = BaseHarnessErrorList
LogcatErrorList = [
{'substr': 'Fatal signal 11 (SIGSEGV)', 'level': ERROR,
'explanation': 'This usually indicates the B2G process has crashed'},

View File

@ -161,7 +161,7 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
"action": "store_true",
"dest": "enable_webrender",
"default": False,
"help": "Tries to enable the WebRender compositor.",
"help": "Enable the WebRender compositor in Gecko.",
}],
[["--setpref"], {
"action": "append",
@ -179,7 +179,6 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
'populate-webroot',
'create-virtualenv',
'install',
'setup-mitmproxy',
'run-tests',
])
kwargs.setdefault('default_actions', ['clobber',
@ -187,7 +186,6 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
'populate-webroot',
'create-virtualenv',
'install',
'setup-mitmproxy',
'run-tests',
])
kwargs.setdefault('config', {})
@ -209,14 +207,6 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
self.gecko_profile_interval = self.config.get('gecko_profile_interval')
self.pagesets_name = None
self.benchmark_zip = None
# some platforms download a mitmproxy release binary
self.mitmproxy_rel_bin = None
# zip file found on tooltool that contains all of the mitmproxy recordings
self.mitmproxy_recording_set = None
# files inside the recording set
self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None)
# path to mitdump tool itself, in py3 venv
self.mitmdump = None
# We accept some configuration options from the try commit message in the format
# mozharness: <options>
@ -275,19 +265,6 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
self.benchmark_zip_manifest = 'jetstream-benchmark.manifest'
return self.benchmark_zip
def query_mitmproxy_recordings_file_list(self):
""" When using mitmproxy we also need the name of the playback files that are included
inside the playback archive.
"""
if self.mitmproxy_recordings_file_list:
return self.mitmproxy_recordings_file_list
if self.query_talos_json_config() and self.suite is not None:
talos_opts = self.talos_json_config['suites'][self.suite].get('talos_options', None)
for index, val in enumerate(talos_opts):
if val == '--mitmproxy':
self.mitmproxy_recordings_file_list = talos_opts[index + 1]
return self.mitmproxy_recordings_file_list
def get_suite_from_test(self):
""" Retrieve the talos suite name from a given talos test name."""
# running locally, single test name provided instead of suite; go through tests and
@ -338,18 +315,7 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
kw_options['title'] = self.config['title']
if self.symbols_path:
kw_options['symbolsPath'] = self.symbols_path
# if using mitmproxy, we've already created a py3 venv just
# for it; need to add the path to that env/mitdump tool
if self.mitmdump:
kw_options['mitmdumpPath'] = self.mitmdump
# also need to have recordings list; get again here from talos.json, in case talos was
# invoked via '-a' and therefore the --mitmproxy param wasn't used on command line
if not self.config.get('mitmproxy', None):
file_list = self.query_mitmproxy_recordings_file_list()
if file_list is not None:
kw_options['mitmproxy'] = file_list
else:
self.fatal("Talos requires list of mitmproxy playback files, use --mitmproxy")
kw_options.update(kw)
# talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
tests = kw_options.get('activeTests')
@ -369,6 +335,9 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
options.extend(['--code-coverage'])
if self.config['extra_prefs']:
options.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
if self.config['enable_webrender']:
options.extend(['--enable-webrender'])
return options
def populate_webroot(self):
@ -453,131 +422,6 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
except Exception:
self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
def setup_mitmproxy(self):
"""Some talos tests require the use of mitmproxy to playback the pages,
set it up here.
"""
if not self.query_mitmproxy_recording_set():
self.info("Skipping: mitmproxy is not required")
return
os_name = self.platform_name()
# on windows we need to install a pytyon 3 virtual env; on macosx and linux we
# use a mitmdump pre-built binary that doesn't need an external python 3
if 'win' in os_name:
# setup python 3.x virtualenv
self.setup_py3_virtualenv()
# install mitmproxy
self.install_mitmproxy()
# download the recording set; will be overridden by the --no-download
if ('talos_extra_options' in self.config and
'--no-download' not in self.config['talos_extra_options']) or \
'talos_extra_options' not in self.config:
self.download_mitmproxy_recording_set()
else:
self.info("Not downloading mitmproxy recording set because no-download was specified")
def setup_py3_virtualenv(self):
"""Mitmproxy needs Python 3.x; set up a separate py 3.x env here"""
self.info("Setting up python 3.x virtualenv, required for mitmproxy")
# first download the py3 package
self.py3_path = self.fetch_python3()
# now create the py3 venv
self.py3_venv_configuration(python_path=self.py3_path, venv_path='py3venv')
self.py3_create_venv()
self.py3_install_modules(["cffi==1.10.0"])
requirements = [os.path.join(self.talos_path, 'talos',
'mitmproxy', 'mitmproxy_requirements.txt')]
self.py3_install_requirement_files(requirements)
# add py3 executables path to system path
sys.path.insert(1, self.py3_path_to_executables())
def install_mitmproxy(self):
"""Install the mitmproxy tool into the Python 3.x env"""
self.info("Installing mitmproxy")
if 'win' in self.platform_name():
self.py3_install_modules(modules=['mitmproxy'])
self.mitmdump = os.path.join(self.py3_path_to_executables(), 'mitmdump')
else:
# on macosx and linux64 we use a prebuilt mitmproxy release binary
mitmproxy_path = os.path.join(self.talos_path, 'talos', 'mitmproxy')
self.mitmdump = os.path.join(mitmproxy_path, 'mitmdump')
if not os.path.exists(self.mitmdump):
# download the mitmproxy release binary; will be overridden by the --no-download
if ('talos_extra_options' in self.config and
'--no-download' not in self.config['talos_extra_options']) or \
'talos_extra_options' not in self.config:
if 'osx' in self.platform_name():
_platform = 'osx'
else:
_platform = 'linux64'
self.query_mitmproxy_rel_bin(_platform)
if self.mitmproxy_rel_bin is None:
self.fatal("Aborting: mitmproxy_release_bin_osx not found in talos.json")
self.download_mitmproxy_binary(_platform)
else:
self.info("Not downloading mitmproxy rel binary because no-download was "
"specified")
self.info('The mitmdump macosx binary is found at: %s' % self.mitmdump)
self.run_command([self.mitmdump, '--version'], env=self.query_env())
def query_mitmproxy_rel_bin(self, platform):
"""Mitmproxy requires external playback archives to be downloaded and extracted"""
if self.mitmproxy_rel_bin:
return self.mitmproxy_rel_bin
if self.query_talos_json_config() and self.suite is not None:
config_key = "mitmproxy_release_bin_" + platform
self.mitmproxy_rel_bin = self.talos_json_config['suites'][self.suite].get(config_key,
False)
return self.mitmproxy_rel_bin
def download_mitmproxy_binary(self, platform):
"""Download the mitmproxy release binary from tooltool"""
self.info("Downloading the mitmproxy release binary using tooltool")
dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
_manifest = "mitmproxy-rel-bin-%s.manifest" % platform
manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy', _manifest)
if platform in ['osx', 'linux64']:
self.tooltool_fetch(
manifest_file,
output_dir=dest,
cache=self.config.get('tooltool_cache')
)
archive = os.path.join(dest, self.mitmproxy_rel_bin)
tar = self.query_exe('tar')
unzip_cmd = [tar, '-xvzf', archive, '-C', dest]
self.run_command(unzip_cmd, halt_on_failure=True)
def query_mitmproxy_recording_set(self):
"""Mitmproxy requires external playback archives to be downloaded and extracted"""
if self.mitmproxy_recording_set:
return self.mitmproxy_recording_set
if self.query_talos_json_config() and self.suite is not None:
self.mitmproxy_recording_set = (
self.talos_json_config['suites'][self.suite].get('mitmproxy_recording_set', False))
return self.mitmproxy_recording_set
def download_mitmproxy_recording_set(self):
"""Download the set of mitmproxy recording files that will be played back"""
self.info("Downloading the mitmproxy recording set using tooltool")
dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
manifest_file = os.path.join(self.talos_path, 'talos',
'mitmproxy', 'mitmproxy-playback-set.manifest')
self.tooltool_fetch(
manifest_file,
output_dir=dest,
cache=self.config.get('tooltool_cache')
)
archive = os.path.join(dest, self.mitmproxy_recording_set)
unzip = self.query_exe('unzip')
unzip_cmd = [unzip, '-q', '-o', archive, '-d', dest]
self.run_command(unzip_cmd, halt_on_failure=True)
# Action methods. {{{1
# clobber defined in BaseScript
@ -710,18 +554,11 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
else:
env['PYTHONPATH'] = self.talos_path
# mitmproxy needs path to mozharness when installing the cert
env['SCRIPTSPATH'] = scripts_path
if self.repo_path is not None:
env['MOZ_DEVELOPER_REPO_DIR'] = self.repo_path
if self.obj_path is not None:
env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path
if self.config['enable_webrender']:
env['MOZ_WEBRENDER'] = '1'
env['MOZ_ACCELERATED'] = '1'
# TODO: consider getting rid of this as we should be default to stylo now
env['STYLO_FORCE_ENABLED'] = '1'
@ -782,20 +619,3 @@ class Talos(TestingMixin, MercurialScript, TooltoolMixin,
self.record_status(parser.worst_tbpl_status,
level=parser.worst_log_level)
def fetch_python3(self):
manifest_file = os.path.join(
self.talos_path,
'talos',
'mitmproxy',
self.config.get('python3_manifest')[self.platform_name()])
output_dir = self.query_abs_dirs()['abs_work_dir']
# Slowdown: The unzipped Python3 installation gets deleted every time
self.tooltool_fetch(
manifest_file,
output_dir=output_dir,
cache=self.config.get('tooltool_cache')
)
python3_path = os.path.join(output_dir, 'python3.6', 'python')
self.run_command([python3_path, '--version'], env=self.query_env())
return python3_path

View File

@ -8,6 +8,7 @@
import copy
import json
import time
import glob
import os
import sys
import posixpath
@ -148,7 +149,7 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
"""
from mozhttpd import MozHttpd
from mozprofile import Preferences
from mozdevice import ADBDevice, ADBProcessError, ADBTimeoutError
from mozdevice import ADBDevice, ADBTimeoutError
from six import string_types
from marionette_driver.marionette import Marionette
@ -201,9 +202,9 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
# bool pref.
prefs["browser.tabs.remote.autostart"] = False
outputdir = self.config.get('output_directory', '/sdcard')
outputdir = self.config.get('output_directory', '/sdcard/pgo_profile')
jarlog = posixpath.join(outputdir, 'en-US.log')
profdata = posixpath.join(outputdir, 'default.profraw')
profdata = posixpath.join(outputdir, 'default_%p_random_%m.profraw')
env = {}
env["XPCOM_DEBUG_BREAK"] = "warn"
@ -213,6 +214,7 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
adbdevice = ADBDevice(adb=adb,
device='emulator-5554')
adbdevice.mkdir(outputdir)
try:
# Run Fennec a first time to initialize its profile
@ -220,7 +222,7 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
app='fennec',
package_name=app,
adb_path=adb,
bin="target.apk",
bin="geckoview-androidTest.apk",
prefs=prefs,
connect_to_running_emulator=True,
startup_timeout=1000,
@ -256,38 +258,27 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
# There is a delay between execute_script() returning and the profile data
# actually getting written out, so poll the device until we get a profile.
for i in range(50):
try:
localprof = '/builds/worker/workspace/default.profraw'
adbdevice.pull(profdata, localprof)
stats = os.stat(localprof)
if stats.st_size == 0:
# The file may not have been fully written yet, so retry until we
# get actual data.
time.sleep(2)
else:
break
except ADBProcessError:
# The file may not exist at all yet, which would raise an
# ADBProcessError, so retry.
time.sleep(2)
if not adbdevice.process_exist(app):
break
time.sleep(2)
else:
raise Exception("Unable to pull default.profraw")
adbdevice.pull(jarlog, '/builds/worker/workspace/en-US.log')
raise Exception("Android App (%s) never quit" % app)
# Pull all the profraw files and en-US.log
adbdevice.pull(outputdir, '/builds/worker/workspace/')
except ADBTimeoutError:
self.fatal('INFRA-ERROR: Failed with an ADBTimeoutError',
EXIT_STATUS_DICT[TBPL_RETRY])
# We normally merge as part of a GENERATED_FILES step in the profile-use
# build, but Android runs sometimes result in a truncated profile. We do
# a merge here to make sure the data isn't corrupt so we can retry the
# 'run' task if necessary.
profraw_files = glob.glob('/builds/worker/workspace/*.profraw')
if not profraw_files:
self.fatal('Could not find any profraw files in /builds/worker/workspace')
merge_cmd = [
'/builds/worker/workspace/build/clang/bin/llvm-profdata',
os.path.join(os.environ['MOZ_FETCHES_DIR'], 'clang/bin/llvm-profdata'),
'merge',
'/builds/worker/workspace/default.profraw',
'-o',
'/builds/worker/workspace/merged.profraw',
]
'/builds/worker/workspace/merged.profdata',
] + profraw_files
rc = subprocess.call(merge_cmd)
if rc != 0:
self.fatal('INFRA-ERROR: Failed to merge profile data. Corrupt profile?',
@ -299,7 +290,7 @@ class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin,
'-acvf',
'/builds/worker/artifacts/profdata.tar.xz',
'-C', '/builds/worker/workspace',
'merged.profraw',
'merged.profdata',
'en-US.log',
]
subprocess.check_call(tar_cmd)

View File

@ -64,15 +64,6 @@ class DesktopSingleLocale(LocalesMixin, AutomationMixin,
"dest": "en_us_installer_url",
"type": "string",
"help": "Specify the url of the en-us binary"}
], [
['--scm-level'], { # Ignored on desktop for now: see Bug 1414678.
"action": "store",
"type": "int",
"dest": "scm_level",
"default": 1,
"help": "This sets the SCM level for the branch being built."
" See https://www.mozilla.org/en-US/about/"
"governance/policies/commit/access-policy/"}
]]
def __init__(self, require_config_file=True):

View File

@ -84,7 +84,6 @@ class DesktopPartnerRepacks(AutomationMixin, BaseScript, VirtualenvMixin, Secret
if os.getenv('UPSTREAM_TASKIDS'):
self.info('Overriding taskIds with %s' % os.getenv('UPSTREAM_TASKIDS'))
self.config['taskIds'] = os.getenv('UPSTREAM_TASKIDS').split()
self.config['scm_level'] = os.environ.get('MOZ_SCM_LEVEL', '1')
if 'version' not in self.config:
self.fatal("Version (-v) not supplied.")

View File

@ -35,7 +35,6 @@ class FxDesktopBuild(BuildScript, TryToolsMixin, object):
'clobber',
'build',
'static-analysis-autotest',
'check-test',
'valgrind-test',
'multi-l10n',
'package-source',
@ -44,9 +43,7 @@ class FxDesktopBuild(BuildScript, TryToolsMixin, object):
# Default configuration
'config': {
'is_automation': True,
"pgo_build": False,
"debug_build": False,
"pgo_platforms": ['linux', 'linux64', 'win32', 'win64'],
# nightly stuff
"nightly_build": False,
# hg tool stuff

View File

@ -50,15 +50,6 @@ class MobileSingleLocale(LocalesMixin, TooltoolMixin, AutomationMixin,
"type": "string",
"help": "Override the tags set for all repos"
}
], [
['--scm-level'],
{"action": "store",
"type": "int",
"dest": "scm_level",
"default": 1,
"help": "This sets the SCM level for the branch being built."
" See https://www.mozilla.org/en-US/about/"
"governance/policies/commit/access-policy/"}
]]
def __init__(self, require_config_file=True):

View File

@ -81,10 +81,6 @@ class OpenH264Build(TransferMixin, VCSScript, TooltoolMixin):
"action": "store_true",
"default": False,
}],
[["--scm-level"], {
"dest": "scm_level",
"help": "dummy option",
}],
[["--branch"], {
"dest": "branch",
"help": "dummy option",

View File

@ -37,10 +37,6 @@ class ChecksumsGenerator(BaseScript, VirtualenvMixin):
"type": int,
"help": "Number of checksums file to download concurrently",
}],
[["--scm-level"], {
"dest": "scm_level",
"help": "dummy option",
}],
[["--branch"], {
"dest": "branch",
"help": "dummy option",

View File

@ -93,6 +93,7 @@ class Repackage(BaseScript):
command=command,
cwd=dirs['abs_mozilla_dir'],
halt_on_failure=True,
env=self.query_env(),
)
def _run_tooltool(self):

View File

@ -26,7 +26,7 @@ from mozharness.mozilla.testing.codecoverage import (
CodeCoverageMixin,
code_coverage_config_options
)
from mozharness.mozilla.testing.errors import HarnessErrorList
from mozharness.mozilla.testing.errors import WptHarnessErrorList
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.base.log import INFO
@ -66,7 +66,7 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidM
"action": "store_true",
"dest": "enable_webrender",
"default": False,
"help": "Tries to enable the WebRender compositor."}
"help": "Enable the WebRender compositor in Gecko."}
],
[["--headless"], {
"action": "store_true",
@ -246,6 +246,8 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidM
if not c["e10s"]:
cmd.append("--disable-e10s")
if c["enable_webrender"]:
cmd.append("--enable-webrender")
if c["single_stylo_traversal"]:
cmd.append("--stylo-threads=1")
@ -342,7 +344,7 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidM
parser = StructuredOutputParser(config=self.config,
log_obj=self.log_obj,
log_compact=True,
error_list=BaseErrorList + HarnessErrorList,
error_list=BaseErrorList + WptHarnessErrorList,
allow_crashes=True)
env = {'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}
@ -350,9 +352,6 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidM
if self.config['allow_software_gl_layers']:
env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
if self.config['enable_webrender']:
env['MOZ_WEBRENDER'] = '1'
env['MOZ_ACCELERATED'] = '1'
if self.config['headless']:
env['MOZ_HEADLESS'] = '1'
env['MOZ_HEADLESS_WIDTH'] = self.config['headless_width']

View File

@ -42,20 +42,18 @@ def main():
assert windows_sdk_dir is not None, 'WINDOWSSDKDIR should be in MOZ_CONFIGURE_OPTIONS'
ignore_dir_abs = os.path.dirname(windows_sdk_dir)
else:
gcc_dir = os.path.join(buildconfig.topsrcdir, 'gcc')
ignore_dir_abs = gcc_dir
# globs passed to grcov must exist and must be relative to the source directory.
# If it doesn't exist, maybe it has moved and we need to update the paths above.
# If it is no longer relative to the source directory, we no longer need to ignore it and
# this code can be removed.
assert os.path.isdir(ignore_dir_abs), '{} is not a directory'.format(ignore_dir_abs)
assert ignore_dir_abs.startswith(buildconfig.topsrcdir), '{} should start with {}'.format(ignore_dir_abs, buildconfig.topsrcdir)
# globs passed to grcov must exist and must be relative to the source directory.
# If it doesn't exist, maybe it has moved and we need to update the paths above.
# If it is no longer relative to the source directory, we no longer need to ignore it and
# this code can be removed.
assert os.path.isdir(ignore_dir_abs), '{} is not a directory'.format(ignore_dir_abs)
assert ignore_dir_abs.startswith(buildconfig.topsrcdir), '{} should start with {}'.format(ignore_dir_abs, buildconfig.topsrcdir)
grcov_command += ['--ignore-dir', os.path.relpath(ignore_dir_abs, buildconfig.topsrcdir) + '*']
grcov_command += ['--ignore-dir', os.path.relpath(ignore_dir_abs, buildconfig.topsrcdir) + '*']
if buildconfig.substs['OS_TARGET'] == 'Linux':
gcc_dir = os.path.join(os.environ['MOZ_FETCHES_DIR'], 'gcc')
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] = '{}/lib64/:{}'.format(gcc_dir, os.environ['LD_LIBRARY_PATH'])
else:

View File

@ -262,10 +262,16 @@ def extract_unittests_from_args(args, environ, manifest_path):
for test in active_tests
])
# skip non-existing tests
tests = [test for test in tests if os.path.isfile(test[0])]
# skip and warn for any tests in the manifest that are not found
final_tests = []
log = mozlog.get_default_logger()
for test in tests:
if os.path.isfile(test[0]):
final_tests.append(test)
else:
log.warning("test file not found: %s - skipped" % test[0])
return tests
return final_tests
def update_mozinfo():

View File

@ -1638,9 +1638,7 @@ SpecialPowersAPI.prototype = {
// XXX: these APIs really ought to be removed, they're not e10s-safe.
// (also they're pretty Firefox-specific)
_getTopChromeWindow(window) {
return window.docShell.rootTreeItem.domWindow.QueryInterface(
Ci.nsIDOMChromeWindow
);
return window.docShell.rootTreeItem.domWindow;
},
_getAutoCompletePopup(window) {
return this._getTopChromeWindow(window).document.getElementById(

View File

@ -51,7 +51,6 @@ class TalosRunner(MozbuildObject):
default_actions = ['populate-webroot']
default_actions.extend([
'create-virtualenv',
'setup-mitmproxy',
'run-tests',
])
self.config = {

View File

@ -68,42 +68,8 @@
"\"c:/Program Files (x86)/Windows Kits/10/Windows Performance Toolkit/xperf.exe\""
]
},
"tp6": {
"tests": ["tp6_youtube", "tp6_amazon", "tp6_facebook"],
"mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
"mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
"mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
"talos_options": [
"--mitmproxy",
"mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
"--firstNonBlankPaint"
]
},
"tp6-stylo-threads": {
"tests": ["tp6_youtube", "tp6_amazon", "tp6_facebook"],
"mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
"mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
"mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
"talos_options": [
"--stylo-threads=1",
"--mitmproxy",
"mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
"--firstNonBlankPaint"
]
},
"h1": {
"tests": ["ts_paint_heavy"]
},
"h2": {
"tests": ["tp6_google_heavy", "tp6_youtube_heavy", "tp6_amazon_heavy", "tp6_facebook_heavy"],
"mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
"mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
"mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
"talos_options": [
"--mitmproxy",
"mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
"--firstNonBlankPaint"
]
}
}
}

View File

@ -122,11 +122,6 @@ def create_parser(mach_interface=False):
add_arg('--setpref', action='append', default=[], dest="extraPrefs",
metavar="PREF=VALUE",
help="defines an extra user preference")
add_arg('--mitmproxy',
help='Test uses mitmproxy to serve the pages, specify the '
'path and name of the mitmdump file to playback')
add_arg('--mitmdumpPath',
help="Path to mitmproxy's mitmdump playback tool")
add_arg("--firstNonBlankPaint", action='store_true', dest="fnbpaint",
help="Wait for firstNonBlankPaint event before recording the time")
add_arg('--webServer', dest='webserver',
@ -187,6 +182,8 @@ def create_parser(mach_interface=False):
help='Remove any existing ccov gcda output files after browser'
' initialization but before starting the tests. NOTE:'
' Currently only supported in production.')
add_arg('--enable-webrender', action="store_true", default=False,
help="Enable the WebRender compositor in Gecko.")
add_logging_group(parser)
return parser

View File

@ -294,6 +294,7 @@ def get_browser_config(config):
'debugger': None,
'debugger_args': None,
'develop': False,
'enable_webrender': False,
'process': '',
'framework': 'talos',
'repository': None,

View File

@ -1,186 +0,0 @@
# This file was copied from mitmproxy/addons/serverplayback.py release tag 2.0.2 and modified by
# Benjamin Smedberg
# Altered features:
# * --kill returns 404 rather than dropping the whole HTTP/2 connection on the floor
# * best-match response handling is used to improve success rates
from __future__ import absolute_import, print_function
import hashlib
import sys
import urllib
from collections import defaultdict
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import io
from typing import Any # noqa
from typing import List # noqa
class ServerPlayback:
def __init__(self, replayfiles):
self.options = None
self.replayfiles = replayfiles
self.flowmap = {}
def load(self, flows):
for i in flows:
if i.response:
l = self.flowmap.setdefault(self._hash(i.request), [])
l.append(i)
def clear(self):
self.flowmap = {}
def _parse(self, r):
"""
Return (path, queries, formdata, content) for a request.
"""
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
queries = defaultdict(list)
for k, v in queriesArray:
queries[k].append(v)
content = None
formdata = None
if r.raw_content != b'':
if r.multipart_form:
formdata = r.multipart_form
elif r.urlencoded_form:
formdata = r.urlencoded_form
else:
content = r.content
return (path, queries, formdata, content)
def _hash(self, r):
"""
Calculates a loose hash of the flow request.
"""
path, queries, _, _ = self._parse(r)
key = [str(r.port), str(r.scheme), str(r.method), str(path)] # type: List[Any]
if not self.options.server_replay_ignore_host:
key.append(r.host)
if len(queries):
key.append("?")
return hashlib.sha256(
repr(key).encode("utf8", "surrogateescape")
).digest()
def _match(self, request_a, request_b):
"""
Calculate a match score between two requests.
Match algorithm:
* identical query keys: 3 points
* matching query param present: 1 point
* matching query param value: 3 points
* identical form keys: 3 points
* matching form param present: 1 point
* matching form param value: 3 points
* matching body (no multipart or encoded form): 4 points
"""
match = 0
path_a, queries_a, form_a, content_a = self._parse(request_a)
path_b, queries_b, form_b, content_b = self._parse(request_b)
keys_a = set(queries_a.keys())
keys_b = set(queries_b.keys())
if keys_a == keys_b:
match += 3
for key in keys_a:
values_a = set(queries_a[key])
values_b = set(queries_b[key])
if len(values_a) == len(values_b):
match += 1
if values_a == values_b:
match += 3
if form_a and form_b:
keys_a = set(form_a.keys())
keys_b = set(form_b.keys())
if keys_a == keys_b:
match += 3
for key in keys_a:
values_a = set(form_a.get_all(key))
values_b = set(form_b.get_all(key))
if len(values_a) == len(values_b):
match += 1
if values_a == values_b:
match += 3
elif content_a and (content_a == content_b):
match += 4
return match
def next_flow(self, request):
"""
Returns the next flow object, or None if no matching flow was
found.
"""
hsh = self._hash(request)
flows = self.flowmap.get(hsh, None)
if flows is None:
return None
# if it's an exact match, great!
if len(flows) == 1:
candidate = flows[0]
if (candidate.request.url == request.url and
candidate.request.raw_content == request.raw_content):
ctx.log.info("For request {} found exact replay match".format(request.url))
return candidate
# find the best match between the request and the available flow candidates
match = -1
flow = None
ctx.log.debug("Candiate flows for request: {}".format(request.url))
for candidate_flow in flows:
candidate_match = self._match(candidate_flow.request, request)
ctx.log.debug(" score={} url={}".format(candidate_match, candidate_flow.request.url))
if candidate_match > match:
match = candidate_match
flow = candidate_flow
ctx.log.info("For request {} best match {} with score=={}".format(request.url,
flow.request.url, match))
return candidate_flow
def configure(self, options, updated):
self.options = options
self.clear()
try:
flows = io.read_flows_from_paths(self.replayfiles)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load(flows)
def request(self, f):
if self.flowmap:
rflow = self.next_flow(f.request)
if rflow:
response = rflow.response.copy()
response.is_replay = True
if self.options.refresh_server_playback:
response.refresh()
f.response = response
elif self.options.replay_kill_extra:
ctx.log.warn(
"server_playback: killed non-replay request {}".format(
f.request.url
)
)
f.response = http.HTTPResponse.make(404, b'', {'content-type': 'text/plain'})
def start():
files = sys.argv[1:]
print("Replaying from files: {}".format(files))
return ServerPlayback(files)

View File

@ -1,9 +0,0 @@
[
{
"filename": "mitmproxy-recording-set-win10.zip",
"size": 9189938,
"digest": "e904917ed6bf1cef7201284385dc603a283e8e22f992876f17edcf0f1f20db95b609f0d8c7f593b4a0a6c20957dcb6a4d502c562ed74fb6cf4bc255c2f691f32",
"algorithm": "sha512",
"unpack": false
}
]

View File

@ -1,9 +0,0 @@
[
{
"filename": "mitmproxy-2.0.2-linux.tar.gz",
"size": 48997542,
"digest": "b032e04b8763206a19f80b78062efa59dc901ad32fd8d6cf2d20e22744711352da61e75d93a0d93d645179153534f72a154f73432837db415c9b0cd9d981f012",
"algorithm": "sha512",
"unpack": false
}
]

View File

@ -1,9 +0,0 @@
[
{
"filename": "mitmproxy-2.0.2-osx.tar.gz",
"size": 32324573,
"digest": "06423c76e7e99fd9705eae3dc6e2423b1ffb8c42caa98fd010d59dc6ed1f0827376e238c48108106da558444b826e085a58aeb30cf9c79e9d0122a2cb17ae8e6",
"algorithm": "sha512",
"unpack": false
}
]

View File

@ -1,197 +0,0 @@
'''This helps loading mitmproxy's cert and change proxy settings for Firefox.'''
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import os
import subprocess
import sys
import time
import mozinfo
import psutil
from mozlog import get_proxy_logger
here = os.path.dirname(os.path.realpath(__file__))
LOG = get_proxy_logger()
# path for mitmproxy certificate, generated auto after mitmdump is started
# on local machine it is 'HOME', however it is different on production machines
try:
DEFAULT_CERT_PATH = os.path.join(os.getenv('HOME'),
'.mitmproxy', 'mitmproxy-ca-cert.cer')
except Exception:
DEFAULT_CERT_PATH = os.path.join(os.getenv('HOMEDRIVE'), os.getenv('HOMEPATH'),
'.mitmproxy', 'mitmproxy-ca-cert.cer')
# to install mitmproxy certificate into Firefox and turn on/off proxy
POLICIES_CONTENT_ON = '''{
"policies": {
"Certificates": {
"Install": ["%(cert)s"]
},
"Proxy": {
"Mode": "manual",
"HTTPProxy": "127.0.0.1:8080",
"SSLProxy": "127.0.0.1:8080",
"Passthrough": "",
"Locked": true
}
}
}'''
POLICIES_CONTENT_OFF = '''{
"policies": {
"Proxy": {
"Mode": "none",
"Locked": false
}
}
}'''
def install_mitmproxy_cert(mitmproxy_proc, browser_path):
"""Install the CA certificate generated by mitmproxy, into Firefox
1. Create a directory called distribution in the same directory as the Firefox executable
2. Create a file called policies.json with:
{
"policies": {
"certificates": {
"Install": ["FULL_PATH_TO_CERT"]
}
}
}
"""
LOG.info("Installing mitmproxy CA certficate into Firefox")
# browser_path is the exe, we want the folder
policies_dir = os.path.dirname(browser_path)
# on macosx we need to remove the last folders 'MacOS'
# and the policies json needs to go in ../Content/Resources/
if 'mac' in mozinfo.os:
policies_dir = os.path.join(policies_dir[:-6], "Resources")
# for all platforms the policies json goes in a 'distribution' dir
policies_dir = os.path.join(policies_dir, "distribution")
cert_path = DEFAULT_CERT_PATH
# for windows only
if mozinfo.os == 'win':
cert_path = cert_path.replace('\\', '\\\\')
if not os.path.exists(policies_dir):
LOG.info("creating folder: %s" % policies_dir)
os.makedirs(policies_dir)
else:
LOG.info("folder already exists: %s" % policies_dir)
write_policies_json(policies_dir,
policies_content=POLICIES_CONTENT_ON %
{'cert': cert_path})
# cannot continue if failed to add CA cert to Firefox, need to check
if not is_mitmproxy_cert_installed(policies_dir):
LOG.error('Aborting: failed to install mitmproxy CA cert into Firefox')
stop_mitmproxy_playback(mitmproxy_proc)
sys.exit()
def write_policies_json(location, policies_content):
policies_file = os.path.join(location, "policies.json")
LOG.info("writing: %s" % policies_file)
with open(policies_file, 'w') as fd:
fd.write(policies_content)
def read_policies_json(location):
policies_file = os.path.join(location, "policies.json")
LOG.info("reading: %s" % policies_file)
with open(policies_file, 'r') as fd:
return fd.read()
def is_mitmproxy_cert_installed(policies_dir):
"""Verify mitmxproy CA cert was added to Firefox"""
try:
# read autoconfig file, confirm mitmproxy cert is in there
contents = read_policies_json(policies_dir)
LOG.info("Firefox policies file contents:")
LOG.info(contents)
cert_path = DEFAULT_CERT_PATH
# for windows only
if mozinfo.os == 'win':
cert_path = cert_path.replace('\\', '\\\\')
if (POLICIES_CONTENT_ON % {
'cert': cert_path}) in contents:
LOG.info("Verified mitmproxy CA certificate is installed in Firefox")
else:
return False
except Exception as e:
LOG.info("failed to read Firefox policies file, exeption: %s" % e)
return False
return True
def start_mitmproxy_playback(mitmdump_path,
mitmproxy_recording_path,
mitmproxy_recordings_list,
browser_path):
"""Startup mitmproxy and replay the specified flow file"""
mitmproxy_recordings = []
# recording names can be provided in comma-separated list; build py list including path
for recording in mitmproxy_recordings_list:
mitmproxy_recordings.append(os.path.join(mitmproxy_recording_path, recording))
# cmd line to start mitmproxy playback using custom playback script is as follows:
# <path>/mitmdump -s "<path>mitmdump-alternate-server-replay/alternate-server-replay.py
# <path>recording-1.mp <path>recording-2.mp..."
param = os.path.join(here, 'alternate-server-replay.py')
env = os.environ.copy()
# this part is platform-specific
if mozinfo.os == 'win':
param2 = '""' + param.replace('\\', '\\\\\\') + ' ' + \
' '.join(mitmproxy_recordings).replace('\\', '\\\\\\') + '""'
sys.path.insert(1, mitmdump_path)
# mitmproxy needs some DLL's that are a part of Firefox itself, so add to path
env["PATH"] = os.path.dirname(browser_path) + ";" + env["PATH"]
else:
# mac and linux
param2 = param + ' ' + ' '.join(mitmproxy_recordings)
env["PATH"] = os.path.dirname(browser_path)
command = [mitmdump_path, '-k', '-s', param2]
LOG.info("Starting mitmproxy playback using env path: %s" % env["PATH"])
LOG.info("Starting mitmproxy playback using command: %s" % ' '.join(command))
# to turn off mitmproxy log output, use these params for Popen:
# Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
mitmproxy_proc = subprocess.Popen(command, env=env)
time.sleep(10)
data = mitmproxy_proc.poll()
if data is None:
LOG.info("Mitmproxy playback successfully started as pid %d" % mitmproxy_proc.pid)
return mitmproxy_proc
# cannot continue as we won't be able to playback the pages
LOG.error('Aborting: mitmproxy playback process failed to start, poll returned: %s' % data)
sys.exit()
def stop_mitmproxy_playback(mitmproxy_proc):
"""Stop the mitproxy server playback"""
LOG.info("Stopping mitmproxy playback, killing process %d" % mitmproxy_proc.pid)
if mozinfo.os == 'win':
mitmproxy_proc.kill()
else:
mitmproxy_proc.terminate()
time.sleep(10)
if mitmproxy_proc.pid in psutil.pids():
# I *think* we can still continue, as process will be automatically
# killed anyway when mozharness is done (?) if not, we won't be able
# to startup mitmxproy next time if it is already running
LOG.error("Failed to kill the mitmproxy playback process")
else:
LOG.info("Successfully killed the mitmproxy playback process")

View File

@ -1,35 +0,0 @@
argh==0.26.2
asn1crypto==0.22.0
blinker==1.4
pycparser==2.17
cffi==1.10.0
brotlipy==0.6.0
certifi==2017.4.17
click==6.7
construct==2.8.12
cryptography==2.1.4
cssutils==1.0.2
EditorConfig==0.12.1
h2==2.6.2
hpack==3.0.0
html2text==2016.9.19
hyperframe==4.0.2
idna==2.5
jsbeautifier==1.6.12
kaitaistruct==0.6
mitmproxy==2.0.2
packaging==16.8
passlib==1.7.1
pathtools==0.1.2
pyasn1==0.2.3
pyOpenSSL==16.2.0
pyparsing==2.2.0
pyperclip==1.5.27
PyYAML==3.12
requests==2.13.0
ruamel.yaml==0.13.14
six==1.10.0
sortedcontainers==1.5.7
tornado==4.4.3
urwid==1.3.1
watchdog==0.8.3

View File

@ -1,10 +0,0 @@
[
{
"size": 15380470,
"visibility": "public",
"digest": "cd78b88d95b69bef99d7192b71dd34118700f44db0a0069a13bfd4943b131e8d7fdac83859f8ac15d873d4b329eef69d8d75d0a6746d06fdcfc5d06da0c9784c",
"algorithm": "sha512",
"unpack": true,
"filename": "python3.6.zip"
}
]

View File

@ -1,10 +0,0 @@
[
{
"size": 16026760,
"visibility": "public",
"digest": "379428e3955671213a245ccd9ccf6f9d17d368db68c02da8baed7be629f2691127cd3e3f86807b25e2098d9840083fdc07946ab1bed0c14db4a5b628a47ed9ef",
"algorithm": "sha512",
"unpack": true,
"filename": "python3.6.amd64.zip"
}
]

View File

@ -157,7 +157,7 @@ class Output(object):
# responsiveness has it's own metric, not the mean
# TODO: consider doing this for all counters
if 'responsiveness' is name:
if 'responsiveness' == name:
subtest = {
'name': name,
'value': filter.responsiveness_Metric(vals)

View File

@ -20,7 +20,6 @@ import mozversion
from talos import utils
from mozlog import get_proxy_logger
from talos.config import get_configs, ConfigurationError
from talos.mitmproxy import mitmproxy
from talos.results import TalosResults
from talos.ttest import TTest
from talos.utils import TalosError, TalosRegression
@ -224,29 +223,6 @@ def run_tests(config, browser_config):
if config['gecko_profile']:
talos_results.add_extra_option('geckoProfile')
# some tests use mitmproxy to playback pages
mitmproxy_recordings_list = config.get('mitmproxy', False)
if mitmproxy_recordings_list is not False:
# needed so can tell talos ttest to allow external connections
browser_config['mitmproxy'] = True
# start mitmproxy playback; this also generates the CA certificate
mitmdump_path = config.get('mitmdumpPath', False)
if mitmdump_path is False:
# cannot continue, need path for mitmdump playback tool
raise TalosError('Aborting: mitmdumpPath not provided on cmd line but is required')
mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
mitmproxy_proc = mitmproxy.start_mitmproxy_playback(mitmdump_path,
mitmproxy_recording_path,
mitmproxy_recordings_list.split(),
browser_config['browser_path'])
# install the generated CA certificate into Firefox
# mitmproxy cert setup needs path to mozharness install; mozharness has set this
mitmproxy.install_mitmproxy_cert(mitmproxy_proc,
browser_config['browser_path'])
testname = None
# run the tests
@ -321,10 +297,6 @@ def run_tests(config, browser_config):
LOG.info("Completed test suite (%s)" % timer.elapsed())
# if mitmproxy was used for page playback, stop it
if mitmproxy_recordings_list is not False:
mitmproxy.stop_mitmproxy_playback(mitmproxy_proc)
# output results
if results_urls and not browser_config['no_upload_results']:
talos_results.output(results_urls)

View File

@ -894,72 +894,6 @@ class perf_reftest_singletons(PageloaderTest):
alert_threshold = 5.0
@register_test()
class tp6_google(QuantumPageloadTest):
"""
Quantum Pageload Test - Google
"""
tpmanifest = '${talos}/tests/quantum_pageload/quantum_pageload_google.manifest'
fnbpaint = False
tphero = True
@register_test()
class tp6_google_heavy(tp6_google):
"""
tp6_google test ran against a heavy-user profile
"""
profile = 'simple'
@register_test()
class tp6_youtube(QuantumPageloadTest):
"""
Quantum Pageload Test - YouTube
"""
tpmanifest = '${talos}/tests/quantum_pageload/quantum_pageload_youtube.manifest'
@register_test()
class tp6_youtube_heavy(tp6_youtube):
"""
tp6_youtube test ran against a heavy-user profile
"""
profile = 'simple'
@register_test()
class tp6_amazon(QuantumPageloadTest):
"""
Quantum Pageload Test - Amazon
"""
tpmanifest = '${talos}/tests/quantum_pageload/quantum_pageload_amazon.manifest'
@register_test()
class tp6_amazon_heavy(tp6_amazon):
"""
tp6_amazon test ran against a heavy-user profile
"""
profile = 'simple'
@register_test()
class tp6_facebook(QuantumPageloadTest):
"""
Quantum Pageload Test - Facebook
"""
tpmanifest = '${talos}/tests/quantum_pageload/quantum_pageload_facebook.manifest'
@register_test()
class tp6_facebook_heavy(tp6_facebook):
"""
tp6_facebook test ran against a heavy-user profile
"""
profile = 'simple'
@register_test()
class displaylist_mutate(PageloaderTest):
"""

View File

@ -10,3 +10,4 @@ To add a test:
iii) The test steps.
iv) A call to perf_finish().
3) Add your test to the perf_reftest_singletons.manifest file.
4) Add your test to the list in build/pgo/index.html.

View File

@ -1,19 +1,13 @@
% http://localhost/tests/perf-reftest-singletons/bloom-basic.html
# When modifying this list, please also update build/pgo/index.html.
% http://localhost/tests/perf-reftest-singletons/abspos-reflow-1.html
% http://localhost/tests/perf-reftest-singletons/bidi-resolution-1.html
% http://localhost/tests/perf-reftest-singletons/bloom-basic-2.html
% http://localhost/tests/perf-reftest-singletons/style-sharing.html
% http://localhost/tests/perf-reftest-singletons/style-sharing-style-attr.html
% http://localhost/tests/perf-reftest-singletons/display-none-1.html
% http://localhost/tests/perf-reftest-singletons/only-children-1.html
% http://localhost/tests/perf-reftest-singletons/slow-selector-1.html
% http://localhost/tests/perf-reftest-singletons/slow-selector-2.html
% http://localhost/tests/perf-reftest-singletons/style-attr-1.html
% http://localhost/tests/perf-reftest-singletons/bloom-basic.html
% http://localhost/tests/perf-reftest-singletons/coalesce-1.html
% http://localhost/tests/perf-reftest-singletons/coalesce-2.html
% http://localhost/tests/perf-reftest-singletons/parent-basic-singleton.html
% http://localhost/tests/perf-reftest-singletons/tiny-traversal-singleton.html
% http://localhost/tests/perf-reftest-singletons/nth-index-1.html
% http://localhost/tests/perf-reftest-singletons/nth-index-2.html
% http://localhost/tests/perf-reftest-singletons/bidi-resolution-1.html
% http://localhost/tests/perf-reftest-singletons/display-none-1.html
% http://localhost/tests/perf-reftest-singletons/external-string-pass.html
% http://localhost/tests/perf-reftest-singletons/getElementById-1.html
% http://localhost/tests/perf-reftest-singletons/id-getter-1.html
% http://localhost/tests/perf-reftest-singletons/id-getter-2.html
% http://localhost/tests/perf-reftest-singletons/id-getter-3.html
@ -21,10 +15,18 @@
% http://localhost/tests/perf-reftest-singletons/id-getter-5.html
% http://localhost/tests/perf-reftest-singletons/id-getter-6.html
% http://localhost/tests/perf-reftest-singletons/id-getter-7.html
% http://localhost/tests/perf-reftest-singletons/abspos-reflow-1.html
% http://localhost/tests/perf-reftest-singletons/scrollbar-styles-1.html
% http://localhost/tests/perf-reftest-singletons/inline-style-cache-1.html
% http://localhost/tests/perf-reftest-singletons/link-style-cache-1.html
% http://localhost/tests/perf-reftest-singletons/nth-index-1.html
% http://localhost/tests/perf-reftest-singletons/nth-index-2.html
% http://localhost/tests/perf-reftest-singletons/only-children-1.html
% http://localhost/tests/perf-reftest-singletons/parent-basic-singleton.html
% http://localhost/tests/perf-reftest-singletons/scrollbar-styles-1.html
% http://localhost/tests/perf-reftest-singletons/slow-selector-1.html
% http://localhost/tests/perf-reftest-singletons/slow-selector-2.html
% http://localhost/tests/perf-reftest-singletons/style-attr-1.html
% http://localhost/tests/perf-reftest-singletons/style-sharing-style-attr.html
% http://localhost/tests/perf-reftest-singletons/style-sharing.html
% http://localhost/tests/perf-reftest-singletons/tiny-traversal-singleton.html
% http://localhost/tests/perf-reftest-singletons/window-named-property-get.html
% http://localhost/tests/perf-reftest-singletons/getElementById-1.html
% http://localhost/tests/perf-reftest-singletons/external-string-pass.html
# When modifying this list, please also update build/pgo/index.html.

View File

@ -102,6 +102,12 @@ class TTest(object):
if browser_config.get('stylothreads', 0) > 0:
setup.env['STYLO_THREADS'] = str(browser_config['stylothreads'])
if browser_config['enable_webrender']:
setup.env['MOZ_WEBRENDER'] = '1'
setup.env['MOZ_ACCELERATED'] = '1'
else:
setup.env['MOZ_WEBRENDER'] = '0'
# set url if there is one (i.e. receiving a test page, not a manifest/pageloader test)
if test_config.get('url', None) is not None:
test_config['url'] = utils.interpolate(
@ -129,11 +135,6 @@ class TTest(object):
setup.env['JSGC_DISABLE_POISONING'] = '1'
setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'
# if using mitmproxy we must allow access to 'external' sites
if browser_config.get('mitmproxy', False):
LOG.info('Using mitmproxy so setting MOZ_DISABLE_NONLOCAL_CONNECTIONS to 0')
setup.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '0'
# instantiate an object to hold test results
test_results = results.TestResults(
test_config,

View File

@ -228,15 +228,6 @@ class Test_get_config(object):
cls.argv_perf_reftest = '--activeTests perf_reftest -e /some/random/path'.split()
cls.argv_perf_reftest_singletons = \
'--activeTests perf_reftest_singletons -e /some/random/path'.split()
cls.argv_tp6_google = '--activeTests tp6_google -e /some/random/path'.split()
cls.argv_tp6_google_heavy = '--activeTests tp6_google_heavy -e /some/random/path'.split()
cls.argv_tp6_youtube = '--activeTests tp6_youtube -e /some/random/path'.split()
cls.argv_tp6_youtube_heavy = '--activeTests tp6_youtube_heavy -e /some/random/path'.split()
cls.argv_tp6_amazon = '--activeTests tp6_amazon -e /some/random/path'.split()
cls.argv_tp6_amazon_heavy = '--activeTests tp6_amazon_heavy -e /some/random/path'.split()
cls.argv_tp6_facebook = '--activeTests tp6_facebook -e /some/random/path'.split()
cls.argv_tp6_facebook_heavy = \
'--activeTests tp6_facebook_heavy -e /some/random/path'.split()
@classmethod
def teardown_class(cls):
@ -897,130 +888,6 @@ class Test_get_config(object):
assert test_config['lower_is_better'] is True
assert test_config['alert_threshold'] == 5.0
def test_tp6_google_has_expected_attributes(self):
config = get_config(self.argv_tp6_google)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_google'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is False
assert test_config['tpmanifest'] != \
'${talos}/tests/quantum_pageload/quantum_pageload_google.manifest'
def test_tp6_google_heavy_has_expected_attributes(self):
config = get_config(self.argv_tp6_google_heavy)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_google_heavy'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is False
assert test_config['profile'] == 'simple'
def test_tp6_youtube_has_expected_attributes(self):
config = get_config(self.argv_tp6_youtube)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_youtube'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['tpmanifest'] != \
'${talos}/tests/quantum_pageload/quantum_pageload_youtube.manifest'
def test_tp6_youtube_heavy_has_expected_attributes(self):
config = get_config(self.argv_tp6_youtube_heavy)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_youtube_heavy'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['profile'] == 'simple'
def test_tp6_amazon_has_expected_attributes(self):
config = get_config(self.argv_tp6_amazon)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_amazon'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['tpmanifest'] != \
'${talos}/tests/quantum_pageload/quantum_pageload_amazon.manifest'
def test_tp6_amazon_heavy_has_expected_attributes(self):
config = get_config(self.argv_tp6_amazon_heavy)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_amazon_heavy'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['profile'] == 'simple'
def test_tp6_facebook_has_expected_attributes(self):
config = get_config(self.argv_tp6_facebook)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_facebook'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['tpmanifest'] != \
'${talos}/tests/quantum_pageload/quantum_pageload_facebook.manifest'
def test_tp6_facebook_heavy_has_expected_attributes(self):
config = get_config(self.argv_tp6_facebook_heavy)
test_config = config['tests'][0]
assert test_config['name'] == 'tp6_facebook_heavy'
assert test_config['tpcycles'] == 1
assert test_config['tppagecycles'] == 25
assert test_config['gecko_profile_interval'] == 1
assert test_config['gecko_profile_entries'] == 2000000
assert test_config['filters'] is not None
assert test_config['unit'] == 'ms'
assert test_config['lower_is_better'] is True
assert test_config['fnbpaint'] is True
assert test_config['profile'] == 'simple'
@mock.patch('talos.config.get_browser_config')
@mock.patch('talos.config.get_config')

View File

@ -195,7 +195,7 @@ stage-jstests: make-stage-dir
ifdef OBJCOPY
ifneq ($(OBJCOPY), :) # see build/autoconf/toolchain.m4:102 for why this is necessary
ifndef PKG_SKIP_STRIP
ifdef PKG_STRIP
STRIP_COMPILED_TESTS := 1
endif
endif

View File

@ -37,13 +37,13 @@ class WebPlatformTestsRunnerSetup(MozbuildObject):
def kwargs_common(self, kwargs):
tests_src_path = os.path.join(self._here, "tests")
if kwargs["product"] == "fennec":
# package_name may be non-fennec in the future
if kwargs["product"] == "firefox_android":
# package_name may be different in the future
package_name = kwargs["package_name"]
if not package_name:
package_name = self.substs["ANDROID_PACKAGE_NAME"]
kwargs["package_name"] = package_name = "org.mozilla.geckoview.test"
# Note that this import may fail in non-fennec trees
# Note that this import may fail in non-firefox-for-android trees
from mozrunner.devices.android_device import verify_android_device, grant_runtime_permissions
verify_android_device(self, install=True, verbose=False, xre=True, app=package_name)
@ -196,6 +196,12 @@ class WebPlatformTestsUpdater(MozbuildObject):
# pdb.post_mortem()
class WebPlatformTestsUnittestRunner(MozbuildObject):
def run(self, **kwargs):
import unittestrunner
return unittestrunner.run(self.topsrcdir, **kwargs)
def create_parser_update():
from update import updatecommandline
return updatecommandline.create_parser()
@ -232,6 +238,10 @@ def create_parser_metadata_summary():
return metasummary.create_parser()
def create_parser_metadata_merge():
import metamerge
return metamerge.get_parser()
def create_parser_serve():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
"tests", "tools")))
@ -239,6 +249,11 @@ def create_parser_serve():
return serve.serve.get_parser()
def create_parser_unittest():
import unittestrunner
return unittestrunner.get_parser()
@CommandProvider
class MachCommands(MachCommandBase):
def setup(self):
@ -251,17 +266,16 @@ class MachCommands(MachCommandBase):
parser=create_parser_wpt)
def run_web_platform_tests(self, **params):
self.setup()
if conditions.is_android(self) and params["product"] != "fennec":
if conditions.is_android(self) and params["product"] != "firefox_android":
if params["product"] is None:
params["product"] = "fennec"
else:
raise ValueError("Must specify --product=fennec in Android environment.")
params["product"] = "firefox_android"
if "test_objects" in params:
for item in params["test_objects"]:
params["include"].append(item["name"])
del params["test_objects"]
wpt_setup = self._spawn(WebPlatformTestsRunnerSetup)
wpt_setup._mach_context = self._mach_context
wpt_runner = WebPlatformTestsRunner(wpt_setup)
if params["log_mach_screenshot"] is None:
@ -269,6 +283,9 @@ class MachCommands(MachCommandBase):
logger = wpt_runner.setup_logging(**params)
if conditions.is_android(self) and params["product"] != "firefox_android":
logger.warning("Must specify --product=firefox_android in Android environment.")
return wpt_runner.run(logger, **params)
@Command("wpt",
@ -333,3 +350,22 @@ class MachCommands(MachCommandBase):
import metasummary
wpt_setup = self._spawn(WebPlatformTestsRunnerSetup)
return metasummary.run(wpt_setup.topsrcdir, wpt_setup.topobjdir, **params)
@Command("wpt-metadata-merge",
category="testing",
parser=create_parser_metadata_merge)
def wpt_meta_merge(self, **params):
import metamerge
if params["dest"] is None:
params["dest"] = params["current"]
return metamerge.run(**params)
@Command("wpt-unittest",
category="testing",
description="Run the wpt tools and wptrunner unit tests",
parser=create_parser_unittest)
def wpt_unittest(self, **params):
self.setup()
self.virtualenv_manager.install_pip_package('tox')
runner = self._spawn(WebPlatformTestsUnittestRunner)
return 0 if runner.run(**params) else 1

View File

@ -8,7 +8,7 @@ import sys
def create_parser_wpt():
from wptrunner import wptcommandline
return wptcommandline.create_parser(["fennec", "firefox", "chrome", "edge", "servo"])
return wptcommandline.create_parser(["firefox", "firefox_android", "chrome", "edge", "servo"])
class WebPlatformTestsRunner(object):
@ -30,7 +30,7 @@ class WebPlatformTestsRunner(object):
if kwargs["product"] in ["firefox", None]:
kwargs = self.setup.kwargs_firefox(kwargs)
elif kwargs["product"] == "fennec":
elif kwargs["product"] == "firefox_android":
from wptrunner import wptcommandline
kwargs = wptcommandline.check_args(self.setup.kwargs_common(kwargs))
elif kwargs["product"] in ("chrome", "edge", "servo"):

View File

@ -0,0 +1 @@
prefs: [layout.css.image-orientation.initial-from-image:true, image.honor-orientation-metadata:true]

View File

@ -0,0 +1,2 @@
[drawImage-from-bitmap-orientation-none.tentative.html]
expected: FAIL

View File

@ -0,0 +1,2 @@
[drawImage-from-bitmap-swap-width-height-orientation-none.tentative.html]
expected: FAIL

View File

@ -0,0 +1,2 @@
[drawImage-from-element-orientation-none.tentative.html]
expected: FAIL

View File

@ -0,0 +1,2 @@
[drawImage-from-element-swap-width-height-orientation-none.tentative.html]
expected: FAIL

View File

@ -1,4 +0,0 @@
[2d.pattern.image.broken.html]
[Canvas test: 2d.pattern.image.broken]
disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1016482

View File

@ -1,4 +0,0 @@
[2d.pattern.image.incomplete.emptysrc.html]
[Canvas test: 2d.pattern.image.incomplete.emptysrc]
expected: FAIL

View File

@ -1,4 +0,0 @@
[2d.pattern.image.incomplete.nosrc.html]
[Canvas test: 2d.pattern.image.incomplete.nosrc]
expected: FAIL

View File

@ -1,4 +0,0 @@
[2d.pattern.image.incomplete.removedsrc.html]
[Canvas test: 2d.pattern.image.incomplete.removedsrc]
expected: FAIL

View File

@ -0,0 +1,3 @@
[2d.path.isPointInStroke.scaleddashes.html]
[isPointInStroke() should return correct results on dashed paths at high scale factors]
expected: FAIL

View File

@ -0,0 +1,3 @@
[2d.imageData.get.large.crash.html]
[Test that canvas crash when image data cannot be allocated.]
expected: FAIL

View File

@ -0,0 +1,2 @@
leak-threshold: [default:51200]
prefs: [image.honor-orientation-metadata:true, layout.css.image-orientation.initial-from-image:true]

View File

@ -0,0 +1,2 @@
[image-orientation-background-image.html]
fuzzy: 2;40

View File

@ -0,0 +1,2 @@
[image-orientation-border-image.html]
fuzzy: 0-16;80-160

Some files were not shown because too many files have changed in this diff Show More