Add the real benchmarks comparing lldb against gdb for repeated expression evaluations.

Modify lldbbench.py so that lldbtest.line_number() utility function is available to
BenchBase client as just line_number(), and modify lldbtest.py so that self.lldbExec
(the full path for the 'lldb' executable) is available to BenchBase client as well.

An example run of the test case on my MacBook Pro running Lion:

1: test_compare_lldb_to_gdb (TestRepeatedExprs.RepeatedExprsCase)
   Test repeated expressions with lldb vs. gdb. ... 
lldb_avg: 0.204339
gdb_avg: 0.205721
lldb_avg/gdb_avg: 0.993284
ok

llvm-svn: 136740
This commit is contained in:
Johnny Chen 2011-08-02 22:54:37 +00:00
parent 43859a6ad2
commit aaa82ff9ad
4 changed files with 117 additions and 26 deletions

View File

@ -1,6 +1,6 @@
"""Test evaluating expressions repeatedly comparing lldb against gdb.""" """Test evaluating expressions repeatedly comparing lldb against gdb."""
import os import os, sys
import unittest2 import unittest2
import lldb import lldb
import pexpect import pexpect
@ -10,28 +10,118 @@ class RepeatedExprsCase(BenchBase):
mydir = os.path.join("benchmarks", "example") mydir = os.path.join("benchmarks", "example")
@benchmarks_test def setUp(self):
def test_with_lldb(self): BenchBase.setUp(self)
"""Test repeated expressions with lldb.""" self.source = 'main.cpp'
self.buildDefault() self.line_to_break = line_number(self.source, '// Set breakpoint here.')
self.run_lldb_repeated_exprs() self.lldb_avg = None
self.gdb_avg = None
@benchmarks_test @benchmarks_test
def test_with_gdb(self): def test_compare_lldb_to_gdb(self):
"""Test repeated expressions with gdb.""" """Test repeated expressions with lldb vs. gdb."""
self.buildDefault() self.buildDefault()
self.run_gdb_repeated_exprs() self.exe_name = 'a.out'
def run_lldb_repeated_exprs(self): print
for i in range(1000): self.run_lldb_repeated_exprs(self.exe_name, 100)
self.run_gdb_repeated_exprs(self.exe_name, 100)
print "lldb_avg: %f" % self.lldb_avg
print "gdb_avg: %f" % self.gdb_avg
print "lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)
def run_lldb_repeated_exprs(self, exe_name, count):
exe = os.path.join(os.getcwd(), exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbExec, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'expr ptr[j]->point.x'
expr_cmd2 = 'expr ptr[j]->point.y'
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch: with self.stopwatch:
print "running "+self.testMethodName child.sendline(expr_cmd1)
print "benchmarks result for "+self.testMethodName child.expect_exact(prompt)
print "stopwatch:", str(self.stopwatch) child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('process continue')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb expression benchmark:", str(self.stopwatch)
self.child = None
def run_gdb_repeated_exprs(self, exe_name, count):
exe = os.path.join(os.getcwd(), exe_name)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('gdb %s' % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s:%d' % (self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'print ptr[j]->point.x'
expr_cmd2 = 'print ptr[j]->point.y'
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('continue')
child.expect_exact(prompt)
child.sendline('quit')
child.expect_exact('The program is running. Exit anyway?')
child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
if self.TraceOn():
print "gdb expression benchmark:", str(self.stopwatch)
self.child = None
def run_gdb_repeated_exprs(self):
print "running "+self.testMethodName
print "benchmarks result for "+self.testMethodName
if __name__ == '__main__': if __name__ == '__main__':
import atexit import atexit

View File

@ -30,13 +30,13 @@ int main(int argc, char const *argv[]) {
} }
printf("Finished populating data.\n"); printf("Finished populating data.\n");
for (int i = 0; i < 1000; ++i) { for (int j = 0; j < 1000; ++j) {
bool dump = argc > 1; // Set breakpoint here. bool dump = argc > 1; // Set breakpoint here.
// Evaluate a couple of expressions (2*1000 = 2000 exprs): // Evaluate a couple of expressions (2*1000 = 2000 exprs):
// expr ptr[i]->point.x // expr ptr[j]->point.x
// expr ptr[i]->point.y // expr ptr[j]->point.y
if (dump) { if (dump) {
printf("data[%d] = %d (%d, %d)\n", i, ptr[i]->id, ptr[i]->point.x, ptr[i]->point.y); printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y);
} }
} }
return 0; return 0;

View File

@ -1,6 +1,7 @@
import time import time
from lldbtest import benchmarks_test
from lldbtest import Base from lldbtest import Base
from lldbtest import benchmarks_test
from lldbtest import line_number
class Stopwatch(object): class Stopwatch(object):
"""Stopwatch provides a simple utility to start/stop your stopwatch multiple """Stopwatch provides a simple utility to start/stop your stopwatch multiple
@ -80,7 +81,7 @@ class Stopwatch(object):
return self.__total_elapsed__ / self.__laps__ return self.__total_elapsed__ / self.__laps__
def __str__(self): def __str__(self):
return "Avg: %f (Laps: %d, Total Elapsed Time: %d)" % (self.avg(), return "Avg: %f (Laps: %d, Total Elapsed Time: %f)" % (self.avg(),
self.__laps__, self.__laps__,
self.__total_elapsed__) self.__total_elapsed__)

View File

@ -451,6 +451,9 @@ class Base(unittest2.TestCase):
#import traceback #import traceback
#traceback.print_stack() #traceback.print_stack()
if "LLDB_EXEC" in os.environ:
self.lldbExec = os.environ["LLDB_EXEC"]
# Assign the test method name to self.testMethodName. # Assign the test method name to self.testMethodName.
# #
# For an example of the use of this attribute, look at test/types dir. # For an example of the use of this attribute, look at test/types dir.
@ -837,9 +840,6 @@ class TestBase(Base):
# Works with the test driver to conditionally skip tests via decorators. # Works with the test driver to conditionally skip tests via decorators.
Base.setUp(self) Base.setUp(self)
if "LLDB_EXEC" in os.environ:
self.lldbExec = os.environ["LLDB_EXEC"]
try: try:
if lldb.blacklist: if lldb.blacklist:
className = self.__class__.__name__ className = self.__class__.__name__