[opt-viewer] Put critical items in parallel

Summary:
Put opt-viewer critical items in parallel

Patch by Brian Cain!

Requires features from Python 2.7

**Performance**
Below are performance results across various configurations. These were taken on an i5-5200U (dual core + HT). They were taken with a small subset of the YAML output of building Python 3.6.0b3 with LTO+PGO. 60 YAML files.

"multiprocessing" is the current submission contents. "baseline" is as of 544f14c6b2a07a94168df31833dba9dc35fd8289 (I think this is aka r287505).

"ImportError" vs "class<...CLoader>" below are just confirming the expected configuration (with/without CLoader).

The below was measured on AMD A8-5500B (4 cores) with 224 input YAML files, showing a ~1.75x speed increase over the baseline with libYAML.  I suspect it would scale well on high-end servers.

```
**************************************** MULTIPROCESSING ****************************************
PyYAML:
        Traceback (most recent call last):
          File "<string>", line 1, in <module>
        ImportError: cannot import name CLoader
        Python 2.7.10
489.42user 5.53system 2:38.03elapsed 313%CPU (0avgtext+0avgdata 400308maxresident)k
0inputs+31392outputs (0major+473540minor)pagefaults 0swaps

PyYAML+libYAML:
        <class 'yaml.cyaml.CLoader'>
        Python 2.7.10
78.69user 5.45system 0:32.63elapsed 257%CPU (0avgtext+0avgdata 398560maxresident)k
0inputs+31392outputs (0major+542022minor)pagefaults 0swaps

PyPy/PyYAML:
        Traceback (most recent call last):
          File "<builtin>/app_main.py", line 75, in run_toplevel
          File "<builtin>/app_main.py", line 601, in run_it
          File "<string>", line 1, in <module>
        ImportError: cannot import name 'CLoader'
        Python 2.7.9 (2.6.0+dfsg-3, Jul 04 2015, 05:43:17)
        [PyPy 2.6.0 with GCC 4.9.3]
154.27user 8.12system 0:53.83elapsed 301%CPU (0avgtext+0avgdata 627960maxresident)k
808inputs+30376outputs (0major+727994minor)pagefaults 0swaps
**************************************** BASELINE        ****************************************
PyYAML:
        Traceback (most recent call last):
          File "<string>", line 1, in <module>
        ImportError: cannot import name CLoader
        Python 2.7.10
        358.08user 4.05system 6:08.37elapsed 98%CPU (0avgtext+0avgdata 315004maxresident)k
0inputs+31392outputs (0major+85252minor)pagefaults 0swaps

PyYAML+libYAML:
        <class 'yaml.cyaml.CLoader'>
        Python 2.7.10
50.32user 3.30system 0:56.59elapsed 94%CPU (0avgtext+0avgdata 307296maxresident)k
0inputs+31392outputs (0major+79335minor)pagefaults 0swaps

PyPy/PyYAML:
        Traceback (most recent call last):
          File "<builtin>/app_main.py", line 75, in run_toplevel
          File "<builtin>/app_main.py", line 601, in run_it
          File "<string>", line 1, in <module>
        ImportError: cannot import name 'CLoader'
        Python 2.7.9 (2.6.0+dfsg-3, Jul 04 2015, 05:43:17)
        [PyPy 2.6.0 with GCC 4.9.3]
72.94user 5.18system 1:23.41elapsed 93%CPU (0avgtext+0avgdata 455312maxresident)k
0inputs+30392outputs (0major+110280minor)pagefaults 0swaps

```

Reviewers: fhahn, anemet

Reviewed By: anemet

Subscribers: llvm-commits, mehdi_amini

Differential Revision: https://reviews.llvm.org/D26967

llvm-svn: 293261
This commit is contained in:
Adam Nemet 2017-01-27 06:38:31 +00:00
parent 0b79aa3373
commit 55bfb497d2
1 changed files with 126 additions and 41 deletions

View File

@ -15,6 +15,13 @@ try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import functools
from collections import defaultdict
import itertools
from multiprocessing import Pool
from multiprocessing import Lock, cpu_count
import errno
import argparse
import os.path
import re
@ -24,18 +31,14 @@ from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import HtmlFormatter
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('yaml_files', nargs='+')
parser.add_argument('output_dir')
parser.add_argument('-source-dir', '-s', default='', help='set source directory')
args = parser.parse_args()
p = subprocess.Popen(['c++filt', '-n'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p_lock = Lock()
def demangle(name):
p.stdin.write(name + '\n')
return p.stdout.readline().rstrip()
with p_lock:
p.stdin.write(name + '\n')
return p.stdout.readline().rstrip()
class Remark(yaml.YAMLObject):
@ -156,16 +159,16 @@ class Missed(Remark):
class SourceFileRenderer:
def __init__(self, filename):
def __init__(self, source_dir, output_dir, filename):
existing_filename = None
if os.path.exists(filename):
existing_filename = filename
else:
fn = os.path.join(args.source_dir, filename)
fn = os.path.join(source_dir, filename)
if os.path.exists(fn):
existing_filename = fn
self.stream = open(os.path.join(args.output_dir, SourceFileRenderer.html_file_name(filename)), 'w')
self.stream = open(os.path.join(output_dir, SourceFileRenderer.html_file_name(filename)), 'w')
if existing_filename:
self.source_stream = open(existing_filename)
else:
@ -243,8 +246,8 @@ class SourceFileRenderer:
class IndexRenderer:
def __init__(self):
self.stream = open(os.path.join(args.output_dir, 'index.html'), 'w')
def __init__(self, output_dir):
self.stream = open(os.path.join(output_dir, 'index.html'), 'w')
def render_entry(self, r):
print('''
@ -278,41 +281,123 @@ class IndexRenderer:
</html>''', file=self.stream)
all_remarks = dict()
file_remarks = dict()
def get_remarks(input_file):
max_hotness = 0
all_remarks = dict()
file_remarks = defaultdict(functools.partial(defaultdict, list))
for input_file in args.yaml_files:
f = open(input_file)
docs = yaml.load_all(f, Loader=Loader)
for remark in docs:
# Avoid remarks withoug debug location or if they are duplicated
if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
continue
all_remarks[remark.key] = remark
with open(input_file) as f:
docs = yaml.load_all(f, Loader=Loader)
file_remarks.setdefault(remark.File, dict()).setdefault(remark.Line, []).append(remark)
for remark in docs:
# Avoid remarks withoug debug location or if they are duplicated
if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
continue
all_remarks[remark.key] = remark
Remark.max_hotness = max(Remark.max_hotness, remark.Hotness)
file_remarks[remark.File][remark.Line].append(remark)
# Set up a map between function names and their source location for function where inlining happened
for remark in all_remarks.itervalues():
if type(remark) == Passed and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
caller = arg.get('Caller')
if caller:
max_hotness = max(max_hotness, remark.Hotness)
return max_hotness, all_remarks, file_remarks
def _render_file(source_dir, output_dir, entry):
filename, remarks = entry
SourceFileRenderer(source_dir, output_dir, filename).render(remarks)
def gather_results(pool, filenames):
all_remarks = dict()
remarks = pool.map(get_remarks, filenames)
def merge_dicts(dicts):
''' Takes an iterable of dicts and merges them into
a single dict. Nested dicts are merged as well.
>>> merge_dicts([ {'a': [3], }, {'a': [4], }, {'b': [6] }])
{'a': [3,4,], 'b': [6]}
>>> merge_dicts([ {'a': {'q': [6,3], 'f': [30],}, }, {'a': {'f': [4,10]}, }, {'b': [6] }])
{'a': [{'q': [6,3]}, {'f': [4,10,30]}], 'b': [6]}
'''
merged = defaultdict(functools.partial(defaultdict, list))
for k, v in itertools.chain(*[d.iteritems() for d in dicts]):
for k_, v_ in v.items():
merged[k][k_] += v_
return merged
file_remark_dicts = [entry[2] for entry in remarks]
# merge the list of remarks at each line of each file
file_remarks = merge_dicts(file_remark_dicts)
# merge individual 'all_remark' results:
for _, all_rem, _ in remarks:
all_remarks.update(all_rem)
Remark.max_hotness = max(entry[0] for entry in remarks)
return all_remarks, file_remarks
def map_remarks(all_remarks):
# Set up a map between function names and their source location for
# function where inlining happened
for remark in all_remarks.itervalues():
if isinstance(remark, Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
caller = arg.get('Caller')
if caller:
Remark.caller_loc[caller] = arg['DebugLoc']
if Remark.should_display_hotness():
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: r.Hotness, reverse=True)
else:
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: (r.File, r.Line, r.Column))
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
def generate_report(pool, all_remarks, file_remarks, source_dir, output_dir):
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
for (filename, remarks) in file_remarks.iteritems():
SourceFileRenderer(filename).render(remarks)
_render_file_bound = functools.partial(_render_file, source_dir, output_dir)
pool.map(_render_file_bound, file_remarks.items())
IndexRenderer().render(sorted_remarks)
if Remark.should_display_hotness():
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: r.Hotness, reverse=True)
else:
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: (r.File, r.Line, r.Column))
IndexRenderer(args.output_dir).render(sorted_remarks)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)), "style.css"), args.output_dir)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"style.css"), output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('yaml_files', nargs='+')
parser.add_argument('output_dir')
parser.add_argument(
'--jobs',
'-j',
default=cpu_count(),
type=int,
help='Max job count (defaults to current CPU count)')
parser.add_argument(
'-source-dir',
'-s',
default='',
help='set source directory')
args = parser.parse_args()
if len(args.yaml_files) == 0:
parser.print_help()
sys.exit(1)
pool = Pool(processes=args.jobs)
all_remarks, file_remarks = gather_results(pool, args.yaml_files)
map_remarks(all_remarks)
generate_report(pool, all_remarks, file_remarks, args.source_dir, args.output_dir)