mirror of
https://github.com/Gericom/teak-llvm.git
synced 2025-06-25 14:28:54 -04:00

This moves all the global variables into a separate module called `configuration`. This has a number of advantages: 1. Configuration data is centrally maintained so it's easy to get a high level overview of what configuration data the test suite makes use of. 2. The method of sharing configuration data among different parts of the test suite becomes standardized. Previously we would put some things into the `lldb` module, some things into the `lldbtest_config` module, and some things would not get shared. Now everything is shared through one module and is available to the entire test suite. 3. It opens the door to moving some of the initialization code into the `configuration` module, simplifying the implementation of `dotest.py`. There are a few stragglers that didn't get converted over to using the `configuration` module in this patch, because it would have grown the size of the patch unnecessarily. This includes everything currently in the `lldbtest_config` module, as well as the `lldb.remote_platform` variable. We can address these in the future. llvm-svn: 254982
133 lines
4.3 KiB
Python
133 lines
4.3 KiB
Python
"""Test evaluating expressions repeatedly comparing lldb against gdb."""
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import os, sys
|
|
import lldb
|
|
from lldbsuite.test import configuration
|
|
from lldbsuite.test.lldbbench import *
|
|
|
|
class RepeatedExprsCase(BenchBase):
|
|
|
|
mydir = TestBase.compute_mydir(__file__)
|
|
|
|
def setUp(self):
|
|
BenchBase.setUp(self)
|
|
self.source = 'main.cpp'
|
|
self.line_to_break = line_number(self.source, '// Set breakpoint here.')
|
|
self.lldb_avg = None
|
|
self.gdb_avg = None
|
|
self.count = configuration.bmIterationCount
|
|
if self.count <= 0:
|
|
self.count = 100
|
|
|
|
@benchmarks_test
|
|
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
|
|
def test_compare_lldb_to_gdb(self):
|
|
"""Test repeated expressions with lldb vs. gdb."""
|
|
self.build()
|
|
self.exe_name = 'a.out'
|
|
|
|
print()
|
|
self.run_lldb_repeated_exprs(self.exe_name, self.count)
|
|
print("lldb benchmark:", self.stopwatch)
|
|
self.run_gdb_repeated_exprs(self.exe_name, self.count)
|
|
print("gdb benchmark:", self.stopwatch)
|
|
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg))
|
|
|
|
def run_lldb_repeated_exprs(self, exe_name, count):
|
|
import pexpect
|
|
exe = os.path.join(os.getcwd(), exe_name)
|
|
|
|
# Set self.child_prompt, which is "(lldb) ".
|
|
self.child_prompt = '(lldb) '
|
|
prompt = self.child_prompt
|
|
|
|
# So that the child gets torn down after the test.
|
|
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe))
|
|
child = self.child
|
|
|
|
# Turn on logging for what the child sends back.
|
|
if self.TraceOn():
|
|
child.logfile_read = sys.stdout
|
|
|
|
child.expect_exact(prompt)
|
|
child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
|
|
child.expect_exact(prompt)
|
|
child.sendline('run')
|
|
child.expect_exact(prompt)
|
|
expr_cmd1 = 'expr ptr[j]->point.x'
|
|
expr_cmd2 = 'expr ptr[j]->point.y'
|
|
|
|
# Reset the stopwatch now.
|
|
self.stopwatch.reset()
|
|
for i in range(count):
|
|
with self.stopwatch:
|
|
child.sendline(expr_cmd1)
|
|
child.expect_exact(prompt)
|
|
child.sendline(expr_cmd2)
|
|
child.expect_exact(prompt)
|
|
child.sendline('process continue')
|
|
child.expect_exact(prompt)
|
|
|
|
child.sendline('quit')
|
|
try:
|
|
self.child.expect(pexpect.EOF)
|
|
except:
|
|
pass
|
|
|
|
self.lldb_avg = self.stopwatch.avg()
|
|
if self.TraceOn():
|
|
print("lldb expression benchmark:", str(self.stopwatch))
|
|
self.child = None
|
|
|
|
def run_gdb_repeated_exprs(self, exe_name, count):
|
|
import pexpect
|
|
exe = os.path.join(os.getcwd(), exe_name)
|
|
|
|
# Set self.child_prompt, which is "(gdb) ".
|
|
self.child_prompt = '(gdb) '
|
|
prompt = self.child_prompt
|
|
|
|
# So that the child gets torn down after the test.
|
|
self.child = pexpect.spawn('gdb --nx %s' % exe)
|
|
child = self.child
|
|
|
|
# Turn on logging for what the child sends back.
|
|
if self.TraceOn():
|
|
child.logfile_read = sys.stdout
|
|
|
|
child.expect_exact(prompt)
|
|
child.sendline('break %s:%d' % (self.source, self.line_to_break))
|
|
child.expect_exact(prompt)
|
|
child.sendline('run')
|
|
child.expect_exact(prompt)
|
|
expr_cmd1 = 'print ptr[j]->point.x'
|
|
expr_cmd2 = 'print ptr[j]->point.y'
|
|
|
|
# Reset the stopwatch now.
|
|
self.stopwatch.reset()
|
|
for i in range(count):
|
|
with self.stopwatch:
|
|
child.sendline(expr_cmd1)
|
|
child.expect_exact(prompt)
|
|
child.sendline(expr_cmd2)
|
|
child.expect_exact(prompt)
|
|
child.sendline('continue')
|
|
child.expect_exact(prompt)
|
|
|
|
child.sendline('quit')
|
|
child.expect_exact('The program is running. Exit anyway?')
|
|
child.sendline('y')
|
|
try:
|
|
self.child.expect(pexpect.EOF)
|
|
except:
|
|
pass
|
|
|
|
self.gdb_avg = self.stopwatch.avg()
|
|
if self.TraceOn():
|
|
print("gdb expression benchmark:", str(self.stopwatch))
|
|
self.child = None
|