mirror of
https://gitlab.com/ita1024/waf.git
synced 2024-11-22 09:57:15 +01:00
Provide TaskGroups to replace mem_reducer
This commit is contained in:
parent
c28d7fca83
commit
63a53064a3
@ -1,6 +1,7 @@
|
||||
NEW IN WAF 2.0.0
|
||||
----------------
|
||||
* Provide a new priority system to improve scalability on complex builds
|
||||
* Provide TaskGroup objects to improve scalability on complex builds
|
||||
* Force new files into the build directory by default (use Node objects to bypass)
|
||||
* Simplify the Task class hierarchy; TaskBase is removed
|
||||
* New ant_glob(..., generator=True) now returns a Python generator
|
||||
@ -14,4 +15,5 @@ NEW IN WAF 2.0.0
|
||||
* Remove atleast-version, exact-version and max-version from conf.check_cfg
|
||||
* Remove c_preproc.trimquotes
|
||||
* Remove field_name, type_name, function_name from conf.check() tests
|
||||
* Remove extras/mem_reducer.py as a better solution has been merged
|
||||
|
||||
|
@ -137,7 +137,7 @@ class Parallel(object):
|
||||
Flag that indicates that the build cache must be saved when a task was executed
|
||||
(calls :py:meth:`waflib.Build.BuildContext.store`)"""
|
||||
|
||||
self.revdeps = Utils.defaultdict(list)
|
||||
self.revdeps = Utils.defaultdict(set)
|
||||
"""
|
||||
The reverse dependency graph of dependencies obtained from Task.run_after
|
||||
"""
|
||||
@ -239,18 +239,32 @@ class Parallel(object):
|
||||
|
||||
def mark_finished(self, tsk):
|
||||
# we assume that frozen tasks will be consumed as the build goes
|
||||
|
||||
def try_unfreeze(x):
|
||||
# ancestors are likely to be frozen
|
||||
if x in self.frozen:
|
||||
# TODO remove dependencies to free some memory?
|
||||
# x.run_after.remove(tsk)
|
||||
for k in x.run_after:
|
||||
if not k.hasrun:
|
||||
break
|
||||
else:
|
||||
self.frozen.remove(x)
|
||||
self.insert_with_prio(x)
|
||||
|
||||
if tsk in self.revdeps:
|
||||
for x in self.revdeps[tsk]:
|
||||
# ancestors are likely to be frozen
|
||||
if x in self.frozen:
|
||||
# TODO remove dependencies to free some memory?
|
||||
# x.run_after.remove(tsk)
|
||||
for k in x.run_after:
|
||||
if not k.hasrun:
|
||||
break
|
||||
else:
|
||||
self.frozen.remove(x)
|
||||
self.insert_with_prio(x)
|
||||
if isinstance(x, Task.TaskGroup):
|
||||
x.a.remove(tsk)
|
||||
if not x.a:
|
||||
for k in x.b:
|
||||
# TODO necessary optimization?
|
||||
k.run_after.remove(x)
|
||||
try_unfreeze(k)
|
||||
# TODO necessary optimization?
|
||||
x.b = []
|
||||
else:
|
||||
try_unfreeze(x)
|
||||
del self.revdeps[tsk]
|
||||
|
||||
def get_out(self):
|
||||
@ -428,10 +442,18 @@ class Parallel(object):
|
||||
|
||||
for x in tasks:
|
||||
for k in x.run_after:
|
||||
reverse[k].append(x)
|
||||
if isinstance(k, Task.TaskGroup):
|
||||
for j in k.a:
|
||||
# use the group!
|
||||
reverse[j].add(k)
|
||||
else:
|
||||
reverse[k].add(x)
|
||||
|
||||
# the priority number is not the tree size
|
||||
def visit(n):
|
||||
if isinstance(n, Task.TaskGroup):
|
||||
return sum(visit(k) for k in n.b)
|
||||
|
||||
if n.visited == 0:
|
||||
n.visited = 1
|
||||
if n in reverse:
|
||||
@ -473,6 +495,9 @@ class Parallel(object):
|
||||
tmp[x] = 0
|
||||
|
||||
def visit(n, acc):
|
||||
if isinstance(n, Task.TaskGroup):
|
||||
for k in n.b:
|
||||
visit(k)
|
||||
if tmp[n] == 0:
|
||||
tmp[n] = 1
|
||||
for k in reverse.get(n, []):
|
||||
|
@ -879,6 +879,28 @@ def set_file_constraints(tasks):
|
||||
for a in ins[k]:
|
||||
a.run_after.update(outs[k])
|
||||
|
||||
|
||||
class TaskGroup(object):
|
||||
"""
|
||||
Wrap nxm task order constraints into a single object
|
||||
to prevent the creation of large list/set objects
|
||||
|
||||
This is an optimization
|
||||
"""
|
||||
def __init__(self, a, b):
|
||||
self.a = a
|
||||
self.b = b
|
||||
|
||||
def get_hasrun(self):
|
||||
if not self.a:
|
||||
return SUCCESS
|
||||
for k in self.a:
|
||||
if not k.hasrun:
|
||||
return NOT_RUN
|
||||
return SUCCESS
|
||||
|
||||
hasrun = property(get_hasrun, None)
|
||||
|
||||
def set_precedence_constraints(tasks):
|
||||
"""
|
||||
Updates the ``run_after`` attribute of all tasks based on the after/before/ext_out/ext_in attributes
|
||||
@ -910,9 +932,16 @@ def set_precedence_constraints(tasks):
|
||||
else:
|
||||
continue
|
||||
|
||||
aval = set(cstr_groups[keys[a]])
|
||||
for x in cstr_groups[keys[b]]:
|
||||
x.run_after.update(aval)
|
||||
a = cstr_groups[keys[a]]
|
||||
b = cstr_groups[keys[b]]
|
||||
|
||||
if len(a) < 2 or len(b) < 2:
|
||||
for x in b:
|
||||
x.run_after.update(a)
|
||||
else:
|
||||
group = TaskGroup(set(a), set(b))
|
||||
for x in b:
|
||||
x.run_after.add(group)
|
||||
|
||||
def funex(c):
|
||||
"""
|
||||
|
@ -1,111 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
# encoding: UTF-8
|
||||
|
||||
"""
|
||||
This tool can help to reduce the memory usage in very large builds featuring many tasks with after/before attributes.
|
||||
It may also improve the overall build time by decreasing the amount of iterations over tasks.
|
||||
|
||||
Usage:
|
||||
def options(opt):
|
||||
opt.load('mem_reducer')
|
||||
"""
|
||||
|
||||
import itertools
|
||||
from waflib import Utils, Task, Runner
|
||||
|
||||
class SetOfTasks(object):
|
||||
"""Wraps a set and a task which has a list of other sets.
|
||||
The interface is meant to mimic the interface of set. Add missing functions as needed.
|
||||
"""
|
||||
def __init__(self, owner):
|
||||
self._set = owner.run_after
|
||||
self._owner = owner
|
||||
|
||||
def __iter__(self):
|
||||
for g in self._owner.run_after_groups:
|
||||
#print len(g)
|
||||
for task in g:
|
||||
yield task
|
||||
for task in self._set:
|
||||
yield task
|
||||
|
||||
def add(self, obj):
|
||||
self._set.add(obj)
|
||||
|
||||
def update(self, obj):
|
||||
self._set.update(obj)
|
||||
|
||||
def set_precedence_constraints(tasks):
|
||||
cstr_groups = Utils.defaultdict(list)
|
||||
for x in tasks:
|
||||
x.run_after = SetOfTasks(x)
|
||||
x.run_after_groups = []
|
||||
x.waiting_sets = []
|
||||
|
||||
h = x.hash_constraints()
|
||||
cstr_groups[h].append(x)
|
||||
|
||||
# create sets which can be reused for all tasks
|
||||
for k in cstr_groups.keys():
|
||||
cstr_groups[k] = set(cstr_groups[k])
|
||||
|
||||
# this list should be short
|
||||
for key1, key2 in itertools.combinations(cstr_groups.keys(), 2):
|
||||
group1 = cstr_groups[key1]
|
||||
group2 = cstr_groups[key2]
|
||||
# get the first entry of the set
|
||||
t1 = next(iter(group1))
|
||||
t2 = next(iter(group2))
|
||||
|
||||
# add the constraints based on the comparisons
|
||||
if Task.is_before(t1, t2):
|
||||
for x in group2:
|
||||
x.run_after_groups.append(group1)
|
||||
for k in group1:
|
||||
k.waiting_sets.append(group1)
|
||||
elif Task.is_before(t2, t1):
|
||||
for x in group1:
|
||||
x.run_after_groups.append(group2)
|
||||
for k in group2:
|
||||
k.waiting_sets.append(group2)
|
||||
|
||||
Task.set_precedence_constraints = set_precedence_constraints
|
||||
|
||||
def get_out(self):
|
||||
tsk = self.out.get()
|
||||
if not self.stop:
|
||||
self.add_more_tasks(tsk)
|
||||
self.count -= 1
|
||||
self.dirty = True
|
||||
|
||||
# shrinking sets
|
||||
try:
|
||||
ws = tsk.waiting_sets
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for k in ws:
|
||||
try:
|
||||
k.remove(tsk)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return tsk
|
||||
Runner.Parallel.get_out = get_out
|
||||
|
||||
def skip(self, tsk):
|
||||
tsk.hasrun = Task.SKIPPED
|
||||
|
||||
# shrinking sets
|
||||
try:
|
||||
ws = tsk.waiting_sets
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for k in ws:
|
||||
try:
|
||||
k.remove(tsk)
|
||||
except KeyError:
|
||||
pass
|
||||
Runner.Parallel.skip = skip
|
||||
|
Loading…
Reference in New Issue
Block a user