Support profiling nested hierarchical mtasks with verilator_gantt (#5956)

This commit is contained in:
Bartłomiej Chmiel 2025-04-24 13:50:07 +02:00 committed by GitHub
parent 6ac2aa2d99
commit d6e52b9400
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 158 additions and 115 deletions

View File

@ -37,8 +37,10 @@ def read_data(filename):
with open(filename, "r", encoding="utf8") as fh:
re_thread = re.compile(r'^VLPROFTHREAD (\d+)$')
re_record = re.compile(r'^VLPROFEXEC (\S+) (\d+)(.*)$')
re_payload_mtaskBegin = re.compile(r'id (\d+) predictStart (\d+) cpu (\d+)')
re_payload_mtaskEnd = re.compile(r'id (\d+) predictCost (\d+)')
# The hierBlock argument is optional
re_payload_mtaskBegin = re.compile(
r'id (\d+) predictStart (\d+) cpu (\d+)(?: hierBlock)?\s*(\w+)?')
re_payload_mtaskEnd = re.compile(r'predictCost (\d+)')
re_payload_wait = re.compile(r'cpu (\d+)')
re_arg1 = re.compile(r'VLPROF arg\s+(\S+)\+([0-9.]*)\s*')
@ -56,8 +58,8 @@ def read_data(filename):
ExecGraphStack = []
SectionStack = []
MtasksStack = []
ThreadScheduleWait = collections.defaultdict(list)
mTaskThread = {}
for line in fh:
recordMatch = re_record.match(line)
@ -74,33 +76,37 @@ def read_data(filename):
SectionStack.pop()
Sections[thread].append((tick, tuple(SectionStack)))
elif kind == "MTASK_BEGIN":
mtask, predict_start, ecpu = re_payload_mtaskBegin.match(payload).groups()
mtask, predict_start, ecpu, hier_block = re_payload_mtaskBegin.match(
payload).groups()
hier_block = "" if hier_block is None else hier_block
mtask = int(mtask)
predict_start = int(predict_start)
ecpu = int(ecpu)
mTaskThread[mtask] = thread
records = Threads[thread]
assert not records or records[-1]['start'] <= records[-1]['end'] <= tick
records.append({
'start': tick,
'mtask': mtask,
'predict_start': predict_start,
'hier_block': hier_block,
'cpu': ecpu
})
Mtasks[mtask]['begin'] = tick
Mtasks[mtask]['thread'] = thread
Mtasks[mtask]['predict_start'] = predict_start
Mtasks[(hier_block, mtask)]['begin'] = tick
Mtasks[(hier_block, mtask)]['predict_start'] = predict_start
Mtasks[(hier_block, mtask)]['thread'] = thread
MtasksStack.append((hier_block, mtask, records[-1]))
elif kind == "MTASK_END":
mtask, predict_cost = re_payload_mtaskEnd.match(payload).groups()
predict_cost, = re_payload_mtaskEnd.match(payload).groups()
mtask = int(mtask)
hier_block, mtask, record = MtasksStack.pop()
predict_cost = int(predict_cost)
begin = Mtasks[mtask]['begin']
record = Threads[mTaskThread[mtask]][-1]
begin = Mtasks[(hier_block, mtask)]['begin']
record['end'] = tick
assert record and records[-1]['start'] <= records[-1]['end'] <= tick
record['predict_cost'] = predict_cost
Mtasks[mtask]['elapsed'] += tick - begin
Mtasks[mtask]['predict_cost'] = predict_cost
Mtasks[mtask]['end'] = max(Mtasks[mtask]['end'], tick)
Mtasks[(hier_block, mtask)]['elapsed'] += tick - begin
Mtasks[(hier_block, mtask)]['predict_cost'] = predict_cost
Mtasks[(hier_block, mtask)]['end'] = max(Mtasks[(hier_block, mtask)]['end'],
tick)
elif kind == "THREAD_SCHEDULE_WAIT_BEGIN":
ecpu = int(re_payload_wait.match(payload).groups()[0])
ThreadScheduleWait[ecpu].append(tick)
@ -234,8 +240,8 @@ def report_mtasks():
long_mtask = None
predict_mtask_time = 0
predict_elapsed = 0
for mtaskId in Mtasks:
record = Mtasks[mtaskId]
for (hier_block, mtask_id) in Mtasks:
record = Mtasks[(hier_block, mtask_id)]
predict_mtask_time += record['predict_cost']
total_mtask_time += record['elapsed']
thread_mtask_time[record['thread']] += record['elapsed']
@ -243,7 +249,8 @@ def report_mtasks():
predict_elapsed = max(predict_elapsed, predict_end)
if record['elapsed'] > long_mtask_time:
long_mtask_time = record['elapsed']
long_mtask = mtaskId
long_mtask = mtask_id
long_mtask_hier_block = hier_block
Global['predict_last_end'] = predict_elapsed
serialTime = ElapsedTime - ExecGraphTime
@ -272,31 +279,52 @@ def report_mtasks():
max_p2e = -1000000
max_mtask = None
for mtask in sorted(Mtasks.keys()):
if Mtasks[mtask]['elapsed'] > 0:
if Mtasks[mtask]['predict_cost'] == 0:
Mtasks[mtask]['predict_cost'] = 1 # don't log(0) below
p2e_ratio = math.log(Mtasks[mtask]['predict_cost'] / Mtasks[mtask]['elapsed'])
for (hier_block, mtask_id) in sorted(Mtasks.keys()):
mtask = Mtasks[(hier_block, mtask_id)]
if mtask['elapsed'] > 0:
if mtask['predict_cost'] == 0:
mtask['predict_cost'] = 1 # don't log(0) below
p2e_ratio = math.log(mtask['predict_cost'] / mtask['elapsed'])
p2e_ratios.append(p2e_ratio)
if p2e_ratio > max_p2e:
max_p2e = p2e_ratio
max_mtask = mtask
max_mtask = mtask_id
max_hier_block = hier_block
if p2e_ratio < min_p2e:
min_p2e = p2e_ratio
min_mtask = mtask
min_mtask = mtask_id
min_hier_block = hier_block
print("\nMTask statistics:")
print(" Longest mtask id = {}".format(long_mtask))
if long_mtask_hier_block:
print(" Longest mtask id = {} from hier_block '{}'".format(long_mtask,
long_mtask_hier_block))
else:
print(" Longest mtask id = {}".format(long_mtask))
print(" Longest mtask time = {:.2%} of time elapsed in parallelized code".format(
long_mtask_time / ExecGraphTime))
print(" min log(p2e) = %0.3f" % min_p2e, end="")
print(" from mtask %d (predict %d," % (min_mtask, Mtasks[min_mtask]['predict_cost']), end="")
print(" elapsed %d)" % Mtasks[min_mtask]['elapsed'])
if min_hier_block:
print(" from hier_block '%s' mtask %d (predict %d," %
(min_hier_block, min_mtask, Mtasks[(min_hier_block, min_mtask)]['predict_cost']),
end="")
else:
print(" from mtask %d (predict %d," %
(min_mtask, Mtasks[(min_hier_block, min_mtask)]['predict_cost']),
end="")
print(" elapsed %d)" % Mtasks[(min_hier_block, min_mtask)]['elapsed'])
print(" max log(p2e) = %0.3f" % max_p2e, end="")
print(" from mtask %d (predict %d," % (max_mtask, Mtasks[max_mtask]['predict_cost']), end="")
print(" elapsed %d)" % Mtasks[max_mtask]['elapsed'])
if max_hier_block:
print(" from hier_block '%s' mtask %d (predict %d," %
(max_hier_block, max_mtask, Mtasks[(max_hier_block, max_mtask)]['predict_cost']),
end="")
else:
print(" from mtask %d (predict %d," %
(max_mtask, Mtasks[(max_hier_block, max_mtask)]['predict_cost']),
end="")
print(" elapsed %d)" % Mtasks[(max_hier_block, max_mtask)]['elapsed'])
stddev = statistics.pstdev(p2e_ratios)
mean = statistics.mean(p2e_ratios)
@ -482,17 +510,17 @@ def write_vcd(filename):
# Compute scale so predicted graph is of same width as interval
measured_scaling = (end - start) / Global['predict_last_end']
# Predict mtasks that fill the time the execution occupied
for mtask in Mtasks:
thread = Mtasks[mtask]['thread']
pred_scaled_start = start + int(Mtasks[mtask]['predict_start'] * measured_scaling)
for (hier_block, mtask_id) in Mtasks:
mtask = Mtasks[(hier_block, mtask_id)]
thread = mtask['thread']
pred_scaled_start = start + int(mtask['predict_start'] * measured_scaling)
pred_scaled_end = start + int(
(Mtasks[mtask]['predict_start'] + Mtasks[mtask]['predict_cost']) *
measured_scaling)
(mtask['predict_start'] + mtask['predict_cost']) * measured_scaling)
if pred_scaled_start == pred_scaled_end:
continue
mcode = getCode(32, 'predicted', 't%d_mtask' % thread)
addValue(mcode, pred_scaled_start, mtask)
addValue(mcode, pred_scaled_start, mtask_id)
addValue(mcode, pred_scaled_end, None)
parallelism['predicted'][pred_scaled_start] += 1

View File

@ -190,13 +190,18 @@ void VlExecutionProfiler::dump(const char* filenamep, uint64_t tickEnd)
break;
case VlExecutionRecord::Type::MTASK_BEGIN: {
const auto& payload = er.m_payload.mtaskBegin;
fprintf(fp, " id %u predictStart %u cpu %u\n", payload.m_id,
payload.m_predictStart, payload.m_cpu);
if (payload.m_hierBlock[0] != '\0') {
fprintf(fp, " id %u predictStart %u cpu %u hierBlock %s\n", payload.m_id,
payload.m_predictStart, payload.m_cpu, payload.m_hierBlock);
} else {
fprintf(fp, " id %u predictStart %u cpu %u\n", payload.m_id,
payload.m_predictStart, payload.m_cpu);
}
break;
}
case VlExecutionRecord::Type::MTASK_END: {
const auto& payload = er.m_payload.mtaskEnd;
fprintf(fp, " id %u predictCost %u\n", payload.m_id, payload.m_predictCost);
fprintf(fp, " predictCost %u\n", payload.m_predictCost);
break;
}
case VlExecutionRecord::Type::THREAD_SCHEDULE_WAIT_BEGIN:

View File

@ -92,9 +92,9 @@ class VlExecutionRecord final {
uint32_t m_id; // MTask id
uint32_t m_predictStart; // Time scheduler predicted would start
uint32_t m_cpu; // Executing CPU id
const char* m_hierBlock; // Name of a hier block with this mtask
} mtaskBegin;
struct {
uint32_t m_id; // MTask id
uint32_t m_predictCost; // How long scheduler predicted would take
} mtaskEnd;
struct {
@ -120,14 +120,14 @@ public:
m_type = Type::SECTION_PUSH;
}
void sectionPop() { m_type = Type::SECTION_POP; }
void mtaskBegin(uint32_t id, uint32_t predictStart) {
void mtaskBegin(uint32_t id, uint32_t predictStart, const char* hierBlock = "") {
m_payload.mtaskBegin.m_id = id;
m_payload.mtaskBegin.m_predictStart = predictStart;
m_payload.mtaskBegin.m_cpu = VlOs::getcpu();
m_payload.mtaskBegin.m_hierBlock = hierBlock;
m_type = Type::MTASK_BEGIN;
}
void mtaskEnd(uint32_t id, uint32_t predictCost) {
m_payload.mtaskEnd.m_id = id;
void mtaskEnd(uint32_t predictCost) {
m_payload.mtaskEnd.m_predictCost = predictCost;
m_type = Type::MTASK_END;
}

View File

@ -916,18 +916,17 @@ void wrapMTaskBodies(AstExecGraph* const execGraphp) {
funcp->addStmtsp(new AstCStmt{flp, stmt});
};
if (v3Global.opt.hierChild() || !v3Global.opt.hierBlocks().empty()) {
addStrStmt(
"static const unsigned taskId = vlSymsp->__Vm_threadPoolp->assignTaskIndex();\n");
} else {
const string& id = std::to_string(mtaskp->id());
addStrStmt("static constexpr unsigned taskId = " + id + ";\n");
}
addStrStmt("static constexpr unsigned taskId = " + cvtToStr(mtaskp->id()) + ";\n");
if (v3Global.opt.profExec() && mtaskp->threads() <= 1) {
if (v3Global.opt.profExec()) {
const string& predictStart = std::to_string(mtaskp->predictStart());
addStrStmt("VL_EXEC_TRACE_ADD_RECORD(vlSymsp).mtaskBegin(taskId, " + predictStart
+ ");\n");
if (v3Global.opt.hierChild()) {
addStrStmt("VL_EXEC_TRACE_ADD_RECORD(vlSymsp).mtaskBegin(taskId, " + predictStart
+ ", \"" + v3Global.opt.topModule() + "\");\n");
} else {
addStrStmt("VL_EXEC_TRACE_ADD_RECORD(vlSymsp).mtaskBegin(taskId, " + predictStart
+ ");\n");
}
}
// Set mtask ID in the run-time system
@ -939,10 +938,9 @@ void wrapMTaskBodies(AstExecGraph* const execGraphp) {
// Flush message queue
addStrStmt("Verilated::endOfThreadMTask(vlSymsp->__Vm_evalMsgQp);\n");
if (v3Global.opt.profExec() && mtaskp->threads() <= 1) {
const string& predictConst = std::to_string(mtaskp->cost());
addStrStmt("VL_EXEC_TRACE_ADD_RECORD(vlSymsp).mtaskEnd(taskId, " + predictConst
+ ");\n");
if (v3Global.opt.profExec()) {
const string& predictCost = std::to_string(mtaskp->cost());
addStrStmt("VL_EXEC_TRACE_ADD_RECORD(vlSymsp).mtaskEnd(" + predictCost + ");\n");
}
// AstMTask will simply contain a call

View File

@ -903,39 +903,41 @@ VLPROFPROC
VLPROFTHREAD 0
VLPROFEXEC EXEC_GRAPH_BEGIN 945
VLPROFEXEC MTASK_BEGIN 2695 id 6 predictStart 0 cpu 19
VLPROFEXEC MTASK_END 2905 id 6 predictCost 30
VLPROFEXEC MTASK_BEGIN 3795 id 10 predictStart 196 cpu 19 hierBlock sub
VLPROFEXEC MTASK_END 4850 predictCost 30
VLPROFEXEC MTASK_END 5905 predictCost 30
VLPROFEXEC MTASK_BEGIN 9695 id 10 predictStart 196 cpu 19
VLPROFEXEC MTASK_END 9870 id 10 predictCost 30
VLPROFEXEC MTASK_END 9870 predictCost 30
VLPROFEXEC EXEC_GRAPH_END 12180
VLPROFEXEC EXEC_GRAPH_BEGIN 14000
VLPROFEXEC MTASK_BEGIN 15610 id 6 predictStart 0 cpu 19
VLPROFEXEC MTASK_END 15820 id 6 predictCost 30
VLPROFEXEC MTASK_END 15820 predictCost 30
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 20000 cpu 19
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 21000 cpu 19
VLPROFEXEC MTASK_BEGIN 21700 id 10 predictStart 196 cpu 19
VLPROFEXEC MTASK_END 21875 id 10 predictCost 30
VLPROFEXEC MTASK_END 21875 predictCost 30
VLPROFEXEC EXEC_GRAPH_END 22085
VLPROFTHREAD 1
VLPROFEXEC MTASK_BEGIN 5495 id 5 predictStart 0 cpu 10
VLPROFEXEC MTASK_END 6090 id 5 predictCost 30
VLPROFEXEC MTASK_END 6090 predictCost 30
VLPROFEXEC MTASK_BEGIN 6300 id 7 predictStart 30 cpu 10
VLPROFEXEC MTASK_END 6895 id 7 predictCost 30
VLPROFEXEC MTASK_END 6895 predictCost 30
VLPROFEXEC MTASK_BEGIN 7490 id 8 predictStart 60 cpu 10
VLPROFEXEC MTASK_END 8540 id 8 predictCost 107
VLPROFEXEC MTASK_END 8540 predictCost 107
VLPROFEXEC MTASK_BEGIN 9135 id 9 predictStart 167 cpu 10
VLPROFEXEC MTASK_END 9730 id 9 predictCost 30
VLPROFEXEC MTASK_END 9730 predictCost 30
VLPROFEXEC MTASK_BEGIN 10255 id 11 predictStart 197 cpu 10
VLPROFEXEC MTASK_END 11060 id 11 predictCost 30
VLPROFEXEC MTASK_END 11060 predictCost 30
VLPROFEXEC MTASK_BEGIN 18375 id 5 predictStart 0 cpu 10
VLPROFEXEC MTASK_END 18970 id 5 predictCost 30
VLPROFEXEC MTASK_END 18970 predictCost 30
VLPROFEXEC MTASK_BEGIN 19145 id 7 predictStart 30 cpu 10
VLPROFEXEC MTASK_END 19320 id 7 predictCost 30
VLPROFEXEC MTASK_END 19320 predictCost 30
VLPROFEXEC MTASK_BEGIN 19670 id 8 predictStart 60 cpu 10
VLPROFEXEC MTASK_END 19810 id 8 predictCost 107
VLPROFEXEC MTASK_END 19810 predictCost 107
VLPROFEXEC MTASK_BEGIN 20650 id 9 predictStart 167 cpu 10
VLPROFEXEC MTASK_END 20720 id 9 predictCost 30
VLPROFEXEC MTASK_END 20720 predictCost 30
VLPROFEXEC MTASK_BEGIN 21140 id 11 predictStart 197 cpu 10
VLPROFEXEC MTASK_END 21245 id 11 predictCost 30
VLPROFEXEC MTASK_END 21245 predictCost 30
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 22000 cpu 10
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 23000 cpu 10
VLPROF stat ticks 23415

View File

@ -10,42 +10,42 @@ Summary:
Waiting time = 8.54% of elapsed time
Total threads = 2
Total CPUs used = 2
Total mtasks = 7
Total mtasks = 8
Total yields = 0
NUMA assignment:
NUMA status = 0,1,4,5;2,3,6,7
Parallelized code, measured:
Thread utilization = 14.22%
Speedup = 0.284x
Thread utilization = 24.72%
Speedup = 0.494x
Parallelized code, predicted during static scheduling:
Thread utilization = 63.22%
Speedup = 1.26x
Thread utilization = 69.82%
Speedup = 1.4x
All code, measured:
Thread utilization = 20.48%
Speedup = 0.41x
Thread utilization = 29.14%
Speedup = 0.583x
All code, measured, scaled by predicted speedup:
Thread utilization = 56.80%
Speedup = 1.14x
Thread utilization = 62.40%
Speedup = 1.25x
MTask statistics:
Longest mtask id = 5
Longest mtask time = 6.16% of time elapsed in parallelized code
min log(p2e) = -3.681 from mtask 5 (predict 30, elapsed 1190)
Longest mtask id = 6
Longest mtask time = 17.70% of time elapsed in parallelized code
min log(p2e) = -4.736 from mtask 6 (predict 30, elapsed 3420)
max log(p2e) = -2.409 from mtask 8 (predict 107, elapsed 1190)
mean = -2.992
stddev = 0.459
e ^ stddev = 1.583
mean = -3.325
stddev = 0.692
e ^ stddev = 1.998
CPU info:
Id | Time spent executing MTask | Socket | Core | Model
| % of elapsed ticks / ticks | | |
====|============================|========|======|======
10 | 20.18% / 4725 | 0 | 10 | Test Ryzen 9 3950X 16-Core Processor
19 | 3.29% / 770 | 0 | 3 | Test Ryzen 9 3950X 16-Core Processor
19 | 20.61% / 4825 | 0 | 3 | Test Ryzen 9 3950X 16-Core Processor
Writing profile_exec.vcd

View File

@ -44,15 +44,20 @@ b110 v8
b101 v9
b1 va
b10 vb
#2905
bz v0
bz v1
bz v2
b0 va
#3795
b1010 v0
b0 v1
b1010 v2
b10 va
#3800
bz v8
b111 v9
b1 vb
#4850
bz v0
bz v1
bz v2
b1 va
#4906
b1000 v9
b1 vb
@ -60,6 +65,11 @@ b1 vb
b101 v3
b1 v4
b101 v5
b10 va
#5905
bz v0
bz v1
bz v2
b1 va
#6090
bz v3
@ -111,10 +121,10 @@ bz v2
b0 va
#9917
b1010 v8
b10 vb
b11 vb
#9954
b1011 v9
b10 vb
b11 vb
#10255
b1011 v3
b1 v4
@ -199,10 +209,10 @@ b0 va
bz v6
#21019
b1010 v8
b10 vb
b11 vb
#21047
b1011 v9
b10 vb
b11 vb
#21140
b1011 v3
b1 v4

View File

@ -47,19 +47,19 @@ VLPROFPROC
VLPROFTHREAD 0
VLPROFEXEC EXEC_GRAPH_BEGIN 58532
VLPROFEXEC MTASK_BEGIN 90465 id 85 predictStart 14315 cpu 2
VLPROFEXEC MTASK_END 155034 id 85 predictCost 30533
VLPROFEXEC MTASK_END 155034 predictCost 30533
VLPROFEXEC MTASK_BEGIN 156555 id 79 predictStart 44848 cpu 2
VLPROFEXEC MTASK_END 294309 id 79 predictCost 48001
VLPROFEXEC MTASK_END 294309 predictCost 48001
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 295000 cpu 2
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 296000 cpu 2
VLPROFEXEC EXEC_GRAPH_END 300000
VLPROFTHREAD 1
VLPROFEXEC MTASK_BEGIN 77352 id 90 predictStart 14315 cpu 3
VLPROFEXEC MTASK_END 78511 id 90 predictCost 21592
VLPROFEXEC MTASK_END 78511 predictCost 21592
VLPROFEXEC MTASK_BEGIN 79799 id 81 predictStart 35907 cpu 3
VLPROFEXEC MTASK_END 80667 id 81 predictCost 29215
VLPROFEXEC MTASK_END 80667 predictCost 29215
VLPROFEXEC MTASK_BEGIN 81746 id 87 predictStart 65147 cpu 3
VLPROFEXEC MTASK_END 82633 id 87 predictCost 33809
VLPROFEXEC MTASK_END 82633 predictCost 33809
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 83000 cpu 3
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 84000 cpu 3
VLPROF stat ticks 300000

View File

@ -6,39 +6,39 @@ VLPROF stat yields 0
VLPROFTHREAD 0
VLPROFEXEC EXEC_GRAPH_BEGIN 945
VLPROFEXEC MTASK_BEGIN 2695 id 6 predictStart 0 cpu 19
VLPROFEXEC MTASK_END 2905 id 6 predictCost 30
VLPROFEXEC MTASK_END 2905 predictCost 30
VLPROFEXEC MTASK_BEGIN 9695 id 10 predictStart 196 cpu 19
VLPROFEXEC MTASK_END 9870 id 10 predictCost 30
VLPROFEXEC MTASK_END 9870 predictCost 30
VLPROFEXEC EXEC_GRAPH_END 12180
VLPROFEXEC EXEC_GRAPH_BEGIN 14000
VLPROFEXEC MTASK_BEGIN 15610 id 6 predictStart 0 cpu 19
VLPROFEXEC MTASK_END 15820 id 6 predictCost 30
VLPROFEXEC MTASK_END 15820 predictCost 30
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 16000 cpu 19
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 17000 cpu 19
VLPROFEXEC MTASK_BEGIN 21700 id 10 predictStart 196 cpu 19
VLPROFEXEC MTASK_END 21875 id 10 predictCost 30
VLPROFEXEC MTASK_END 21875 predictCost 30
VLPROFEXEC EXEC_GRAPH_END 22085
VLPROFTHREAD 1
VLPROFEXEC MTASK_BEGIN 5495 id 5 predictStart 0 cpu 10
VLPROFEXEC MTASK_END 6090 id 5 predictCost 30
VLPROFEXEC MTASK_END 6090 predictCost 30
VLPROFEXEC MTASK_BEGIN 6300 id 7 predictStart 30 cpu 10
VLPROFEXEC MTASK_END 6895 id 7 predictCost 30
VLPROFEXEC MTASK_END 6895 predictCost 30
VLPROFEXEC MTASK_BEGIN 7490 id 8 predictStart 60 cpu 10
VLPROFEXEC MTASK_END 8540 id 8 predictCost 107
VLPROFEXEC MTASK_END 8540 predictCost 107
VLPROFEXEC MTASK_BEGIN 9135 id 9 predictStart 167 cpu 10
VLPROFEXEC MTASK_END 9730 id 9 predictCost 30
VLPROFEXEC MTASK_END 9730 predictCost 30
VLPROFEXEC MTASK_BEGIN 10255 id 11 predictStart 197 cpu 10
VLPROFEXEC MTASK_END 11060 id 11 predictCost 30
VLPROFEXEC MTASK_END 11060 predictCost 30
VLPROFEXEC THREAD_SCHEDULE_WAIT_BEGIN 17000 cpu 10
VLPROFEXEC THREAD_SCHEDULE_WAIT_END 18000 cpu 10
VLPROFEXEC MTASK_BEGIN 18375 id 5 predictStart 0 cpu 10
VLPROFEXEC MTASK_END 18970 id 5 predictCost 30
VLPROFEXEC MTASK_END 18970 predictCost 30
VLPROFEXEC MTASK_BEGIN 19145 id 7 predictStart 30 cpu 10
VLPROFEXEC MTASK_END 19320 id 7 predictCost 30
VLPROFEXEC MTASK_END 19320 predictCost 30
VLPROFEXEC MTASK_BEGIN 19670 id 8 predictStart 60 cpu 10
VLPROFEXEC MTASK_END 19810 id 8 predictCost 107
VLPROFEXEC MTASK_END 19810 predictCost 107
VLPROFEXEC MTASK_BEGIN 20650 id 9 predictStart 167 cpu 10
VLPROFEXEC MTASK_END 20720 id 9 predictCost 30
VLPROFEXEC MTASK_END 20720 predictCost 30
VLPROFEXEC MTASK_BEGIN 21140 id 11 predictStart 197 cpu 10
VLPROFEXEC MTASK_END 21245 id 11 predictCost 30
VLPROFEXEC MTASK_END 21245 predictCost 30
VLPROF stat ticks 23415