[U-Boot] [PATCH 1/2] WIP: Changes to patman libraries

These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
Signed-off-by: Simon Glass sjg@chromium.org --- tools/patman/checkpatch.py | 2 - tools/patman/command.py | 86 ++++++-- tools/patman/cros_subprocess.py | 402 +++++++++++++++++++++++++++++++++++++++ tools/patman/gitutil.py | 129 ++++++++++++- tools/patman/patchstream.py | 38 +++- tools/patman/terminal.py | 30 ++- 6 files changed, 642 insertions(+), 45 deletions(-) create mode 100644 tools/patman/cros_subprocess.py
diff --git a/tools/patman/checkpatch.py b/tools/patman/checkpatch.py index d831087..4b6748a 100644 --- a/tools/patman/checkpatch.py +++ b/tools/patman/checkpatch.py @@ -70,8 +70,6 @@ def CheckPatch(fname, verbose=False): '~/bin directory') item = {} stdout = command.Output(chk, '--no-tree', fname) - #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE) - #stdout, stderr = pipe.communicate()
# total: 0 errors, 0 warnings, 159 lines checked re_stats = re.compile('total: (\d+) errors, (\d+) warnings, (\d+)') diff --git a/tools/patman/command.py b/tools/patman/command.py index 4b00250..a67ade3 100644 --- a/tools/patman/command.py +++ b/tools/patman/command.py @@ -20,53 +20,95 @@ #
import os -import subprocess +import cros_subprocess
"""Shell command ease-ups for Python."""
-def RunPipe(pipeline, infile=None, outfile=None, - capture=False, oneline=False, hide_stderr=False): +class CommandResult: + """A class which captures the result of executing a command. + + Members: + stdout: stdout obtained from command, as a string + stderr: stderr obtained from command, as a string + return_code: Return code from command + exception: Exception received, or None if all ok + """ + def __init__(self): + self.stdout = None + self.stderr = None + self.return_code = None + self.exception = None + + +def RunPipe(pipe_list, infile=None, outfile=None, + capture=False, capture_stderr=False, oneline=False, + raise_on_error=True, cwd=None, **kwargs): """ Perform a command pipeline, with optional input/output filenames.
- hide_stderr Don't allow output of stderr (default False) + Args: + pipe_list: List of command lines to execute. Each command line is + piped into the next, and is itself a list of strings. For + example [ ['ls', '.git'] ['wc'] ] will pipe the output of + 'ls .git' into 'wc'. + infile: File to provide stdin to the pipeline + outfile: File to store stdout + capture: True to capture output + capture_stderr: True to capture stderr + oneline: True to strip newline chars from output + kwargs: Additional keyword arguments to cros_subprocess.Popen() + Returns: + CommandResult object """ + result = CommandResult() last_pipe = None + pipeline = list(pipe_list) while pipeline: cmd = pipeline.pop(0) - kwargs = {} if last_pipe is not None: kwargs['stdin'] = last_pipe.stdout elif infile: kwargs['stdin'] = open(infile, 'rb') if pipeline or capture: - kwargs['stdout'] = subprocess.PIPE + kwargs['stdout'] = cros_subprocess.PIPE elif outfile: kwargs['stdout'] = open(outfile, 'wb') - if hide_stderr: - kwargs['stderr'] = open('/dev/null', 'wb') + if capture_stderr: + kwargs['stderr'] = cros_subprocess.PIPE
- last_pipe = subprocess.Popen(cmd, **kwargs) + try: + last_pipe = cros_subprocess.Popen(cmd, cwd=cwd, **kwargs) + except Exception, err: + result.exception = err + print 'exception', pipe_list, err + raise Exception("Error running '%s': %s" % (pipe_list, str))
if capture: - ret = last_pipe.communicate()[0] - if not ret: - return None - elif oneline: - return ret.rstrip('\r\n') - else: - return ret + result.stdout, result.stderr, result.combined = ( + last_pipe.CommunicateFilter(None)) + if result.stdout and oneline: + result.output = result.stdout.rstrip('\r\n') + result.return_code = last_pipe.wait() else: - return os.waitpid(last_pipe.pid, 0)[1] == 0 + result.return_code = os.waitpid(last_pipe.pid, 0)[1] + if raise_on_error and result.return_code: + raise Exception("Error running '%s'" % pipe_list) + return result
def Output(*cmd): - return RunPipe([cmd], capture=True) + return RunPipe([cmd], capture=True, raise_on_error=False).stdout
-def OutputOneLine(*cmd): - return RunPipe([cmd], capture=True, oneline=True) +def OutputOneLine(*cmd, **kwargs): + raise_on_error = kwargs.pop('raise_on_error', True) + return (RunPipe([cmd], capture=True, oneline=True, + raise_on_error=raise_on_error, + **kwargs).stdout.strip())
def Run(*cmd, **kwargs): - return RunPipe([cmd], **kwargs) + return RunPipe([cmd], **kwargs).stdout
def RunList(cmd): - return RunPipe([cmd], capture=True) + return RunPipe([cmd], capture=True).stdout + +def StopAll(): + cros_subprocess.stay_alive = False diff --git a/tools/patman/cros_subprocess.py b/tools/patman/cros_subprocess.py new file mode 100644 index 0000000..8b89387 --- /dev/null +++ b/tools/patman/cros_subprocess.py @@ -0,0 +1,402 @@ +#!/usr/bin/python + +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# Copyright (c) 2003-2005 by Peter Astrand astrand@lysator.liu.se +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/2.4/license for licensing details. + +"""Subprocress execution + +This module holds a subclass of subprocess.Popen with our own required +features. +""" + +#TODO: Fix up indentation + +import errno +import os +import pty +import select +import subprocess +import sys +import unittest + + +# Import these here so the caller does not need to import subprocess also. +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +PIPE_PTY = -3 # Pipe output through a pty +stay_alive = True + + +class Popen(subprocess.Popen): + """Like subprocess.Popen with ptys and incremental output + + This class deals with running a child process and filtering its output on + both stdout and stderr while it is running. We do this so we can monitor + progress, and possibly relay the output to the user if requested. + + The class is similar to subprocess.Popen, the equivalent is something like: + + Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + But this class has many fewer features, and two enhancement: + + 1. Rather than getting the output data only at the end, this class sends it + to a provided operation as it arrives. + 2. We use pseudo terminals so that the child will hopefully flush its output + to us as soon as it is produced, rather than waiting for the end of a + line. + + Use CommunicateFilter() to handle output from the subprocess. + + """ + + def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY, + shell=False, cwd=None, env=None, **kwargs): + """Cut-down constructor + + Args: + args: Program and arguments for subprocess to execute. + stdin: See subprocess.Popen() + stdout: See subprocess.Popen(), except that we support the sentinel + value of cros_subprocess.PIPE_PTY. + stderr: See subprocess.Popen(), except that we support the sentinel + value of cros_subprocess.PIPE_PTY. + shell: See subprocess.Popen() + cwd: Working directory to change to for subprocess, or None if none. + env: Environment to use for this subprocess, or None to inherit parent. + kwargs: No other arguments are supported at the moment. Passing other + arguments will cause a ValueError to be raised. + """ + stdout_pty = None + stderr_pty = None + + if stdout == PIPE_PTY: + stdout_pty = pty.openpty() + stdout = os.fdopen(stdout_pty[1]) + if stderr == PIPE_PTY: + stderr_pty = pty.openpty() + stderr = os.fdopen(stderr_pty[1]) + + super(Popen, self).__init__(args, stdin=stdin, + stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env, + **kwargs) + + # If we're on a PTY, we passed the slave half of the PTY to the subprocess. + # We want to use the master half on our end from now on. Setting this here + # does make some assumptions about the implementation of subprocess, but + # those assumptions are pretty minor. + + # Note that if stderr is STDOUT, then self.stderr will be set to None by + # this constructor. + if stdout_pty is not None: + self.stdout = os.fdopen(stdout_pty[0]) + if stderr_pty is not None: + self.stderr = os.fdopen(stderr_pty[0]) + + # Insist that unit tests exist for other arguments we don't support. + if kwargs: + raise ValueError("Unit tests do not test extra args - please add tests") + + def CommunicateFilter(self, output): + """Interact with process: Read data from stdout and stderr. + + This method runs until end-of-file is reached, then waits for the + subprocess to terminate. + + The output function is sent all output from the subprocess and must be + defined like this: + + def Output([self,] stream, data) + Args: + stream: the stream the output was received on, which will be + sys.stdout or sys.stderr. + data: a string containing the data + + Note: The data read is buffered in memory, so do not use this + method if the data size is large or unlimited. + + Args: + output: Function to call with each fragment of output. + + Returns: + A tuple (stdout, stderr, combined) which is the data received on + stdout, stderr and the combined data (interleaved stdout and stderr). + + Note that the interleaved output will only be sensible if you have + set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on + the timing of the output in the subprocess. If a subprocess flips + between stdout and stderr quickly in succession, by the time we come to + read the output from each we may see several lines in each, and will read + all the stdout lines, then all the stderr lines. So the interleaving + may not be correct. In this case you might want to pass + stderr=cros_subprocess.STDOUT to the constructor. + + This feature is still useful for subprocesses where stderr is + rarely used and indicates an error. + + Note also that if you set stderr to STDOUT, then stderr will be empty + and the combined output will just be the same as stdout. + """ + + read_set = [] + write_set = [] + stdout = None # Return + stderr = None # Return + + if self.stdin: + # Flush stdio buffer. This might block, if the user has + # been writing to .stdin in an uncontrolled fashion. + self.stdin.flush() + if input: + write_set.append(self.stdin) + else: + self.stdin.close() + if self.stdout: + read_set.append(self.stdout) + stdout = [] + if self.stderr and self.stderr != self.stdout: + read_set.append(self.stderr) + stderr = [] + combined = [] + + input_offset = 0 + while read_set or write_set: + try: + rlist, wlist, _ = select.select(read_set, write_set, [], 0.2) + except select.error, e: + if e.args[0] == errno.EINTR: + continue + raise + + if not stay_alive: + self.terminate() + + if self.stdin in wlist: + # When select has indicated that the file is writable, + # we can write up to PIPE_BUF bytes without risk + # blocking. POSIX defines PIPE_BUF >= 512 + chunk = input[input_offset : input_offset + 512] + bytes_written = os.write(self.stdin.fileno(), chunk) + input_offset += bytes_written + if input_offset >= len(input): + self.stdin.close() + write_set.remove(self.stdin) + + if self.stdout in rlist: + data = "" + # We will get an error on read if the pty is closed + try: + data = os.read(self.stdout.fileno(), 1024) + except OSError: + pass + if data == "": + self.stdout.close() + read_set.remove(self.stdout) + else: + stdout.append(data) + combined.append(data) + if output: + output(sys.stdout, data) + if self.stderr in rlist: + data = "" + # We will get an error on read if the pty is closed + try: + data = os.read(self.stderr.fileno(), 1024) + except OSError: + pass + if data == "": + self.stderr.close() + read_set.remove(self.stderr) + else: + stderr.append(data) + combined.append(data) + if output: + output(sys.stderr, data) + + # All data exchanged. Translate lists into strings. + if stdout is not None: + stdout = ''.join(stdout) + else: + stdout = '' + if stderr is not None: + stderr = ''.join(stderr) + else: + stderr = '' + combined = ''.join(combined) + + # Translate newlines, if requested. We cannot let the file + # object do the translation: It is based on stdio, which is + # impossible to combine with select (unless forcing no + # buffering). + if self.universal_newlines and hasattr(file, 'newlines'): + if stdout: + stdout = self._translate_newlines(stdout) + if stderr: + stderr = self._translate_newlines(stderr) + + self.wait() + return (stdout, stderr, combined) + + +# Just being a unittest.TestCase gives us 14 public methods. Unless we +# disable this, we can only have 6 tests in a TestCase. That's not enough. +# +# pylint: disable=R0904 + +class TestSubprocess(unittest.TestCase): + """Our simple unit test for this module""" + + class MyOperation: + """Provides a operation that we can pass to Popen""" + def __init__(self, input_to_send=None): + """Constructor to set up the operation and possible input. + + Args: + input_to_send: a text string to send when we first get input. We will + add \r\n to the string. + """ + self.stdout_data = '' + self.stderr_data = '' + self.combined_data = '' + self.stdin_pipe = None + self._input_to_send = input_to_send + if input_to_send: + pipe = os.pipe() + self.stdin_read_pipe = pipe[0] + self._stdin_write_pipe = os.fdopen(pipe[1], 'w') + + def Output(self, stream, data): + """Output handler for Popen. Stores the data for later comparison""" + if stream == sys.stdout: + self.stdout_data += data + if stream == sys.stderr: + self.stderr_data += data + self.combined_data += data + + # Output the input string if we have one. + if self._input_to_send: + self._stdin_write_pipe.write(self._input_to_send + '\r\n') + self._stdin_write_pipe.flush() + + def _BasicCheck(self, plist, oper): + """Basic checks that the output looks sane.""" + self.assertEqual(plist[0], oper.stdout_data) + self.assertEqual(plist[1], oper.stderr_data) + self.assertEqual(plist[2], oper.combined_data) + + # The total length of stdout and stderr should equal the combined length + self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2])) + + def test_simple(self): + """Simple redirection: Get process list""" + oper = TestSubprocess.MyOperation() + plist = Popen(['ps']).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + + def test_stderr(self): + """Check stdout and stderr""" + oper = TestSubprocess.MyOperation() + cmd = 'echo fred >/dev/stderr && false || echo bad' + plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], 'bad\r\n') + self.assertEqual(plist [1], 'fred\r\n') + + def test_shell(self): + """Check with and without shell works""" + oper = TestSubprocess.MyOperation() + cmd = 'echo test >/dev/stderr' + self.assertRaises(OSError, Popen, [cmd], shell=False) + plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(len(plist [0]), 0) + self.assertEqual(plist [1], 'test\r\n') + + def test_list_args(self): + """Check with and without shell works using list arguments""" + oper = TestSubprocess.MyOperation() + cmd = ['echo', 'test', '>/dev/stderr'] + plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n') + self.assertEqual(len(plist [1]), 0) + + oper = TestSubprocess.MyOperation() + + # this should be interpreted as 'echo' with the other args dropped + cmd = ['echo', 'test', '>/dev/stderr'] + plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], '\r\n') + + def test_cwd(self): + """Check we can change directory""" + for shell in (False, True): + oper = TestSubprocess.MyOperation() + plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], '/tmp\r\n') + + def test_env(self): + """Check we can change environment""" + for add in (False, True): + oper = TestSubprocess.MyOperation() + env = os.environ + if add: + env ['FRED'] = 'fred' + cmd = 'echo $FRED' + plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n') + + def test_extra_args(self): + """Check we can't add extra arguments""" + self.assertRaises(ValueError, Popen, 'true', close_fds=False) + + def test_basic_input(self): + """Check that incremental input works + + We set up a subprocess which will prompt for name. When we see this prompt + we send the name as input to the process. It should then print the name + properly to stdout. + """ + oper = TestSubprocess.MyOperation('Flash') + prompt = 'What is your name?: ' + cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt + plist = Popen([cmd], stdin=oper.stdin_read_pipe, + shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(len(plist [1]), 0) + self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n') + + #TODO(sjg): Add test for passing PIPE in case underlying subprocess breaks. + #TODO(sjg): Add test for passing a file handle also. + + def test_isatty(self): + """Check that ptys appear as terminals to the subprocess""" + oper = TestSubprocess.MyOperation() + cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; ' + 'else echo "not %d" >&%d; fi;') + both_cmds = '' + for fd in (1, 2): + both_cmds += cmd % (fd, fd, fd, fd, fd) + plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], 'terminal 1\r\n') + self.assertEqual(plist [1], 'terminal 2\r\n') + + # Now try with PIPE and make sure it is not a terminal + oper = TestSubprocess.MyOperation() + plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True).CommunicateFilter(oper.Output) + self._BasicCheck(plist, oper) + self.assertEqual(plist [0], 'not 1\n') + self.assertEqual(plist [1], 'not 2\n') + +if __name__ == '__main__': + unittest.main() diff --git a/tools/patman/gitutil.py b/tools/patman/gitutil.py index 72d37a0..5958439 100644 --- a/tools/patman/gitutil.py +++ b/tools/patman/gitutil.py @@ -23,11 +23,12 @@ import command import re import os import series -import settings import subprocess import sys import terminal
+import settings +
def CountCommitsToBranch(): """Returns number of commits between HEAD and the tracking branch. @@ -40,10 +41,123 @@ def CountCommitsToBranch(): """ pipe = [['git', 'log', '--no-color', '--oneline', '@{upstream}..'], ['wc', '-l']] - stdout = command.RunPipe(pipe, capture=True, oneline=True) + stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout + patch_count = int(stdout) + return patch_count + +def GetUpstream(git_dir, branch): + """Returns the name of the upstream for a branch + + Args: + git_dir: Git directory containing repo + branch: Name of branch + + Returns: + Name of upstream branch (e.g. 'upstream/master') or None if none + """ + remote = command.OutputOneLine('git', '--git-dir', git_dir, 'config', + 'branch.%s.remote' % branch) + merge = command.OutputOneLine('git', '--git-dir', git_dir, 'config', + 'branch.%s.merge' % branch) + if remote == '.': + return merge + elif remote and merge: + leaf = merge.split('/')[-1] + return '%s/%s' % (remote, leaf) + else: + raise ValueError, ("Cannot determine upstream branch for branch " + "'%s' remote='%s', merge='%s'" % (branch, remote, merge)) + + +def GetRangeInBranch(git_dir, branch, include_upstream=False): + """Returns an expression for the commits in the given branch. + + Args: + git_dir: Directory containing git repo + branch: Name of branch + Return: + Expression in the form 'upstream..branch' which can be used to + access the commits. + """ + upstream = GetUpstream(git_dir, branch) + return '%s%s..%s' % (upstream, '~' if include_upstream else '', branch) + +def CountCommitsInBranch(git_dir, branch, include_upstream=False): + """Returns the number of commits in the given branch. + + Args: + git_dir: Directory containing git repo + branch: Name of branch + Return: + Number of patches that exist on top of the branch + """ + range_expr = GetRangeInBranch(git_dir, branch, include_upstream) + pipe = [['git', '--git-dir', git_dir, 'log', '--oneline', range_expr], + ['wc', '-l']] + result = command.RunPipe(pipe, capture=True, oneline=True) + patch_count = int(result.stdout) + return patch_count + +def CountCommits(commit_range): + """Returns the number of commits in the given range. + + Args: + commit_range: Range of commits to count (e.g. 'HEAD..base') + Return: + Number of patches that exist on top of the branch + """ + pipe = [['git', 'log', '--oneline', commit_range], + ['wc', '-l']] + stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout patch_count = int(stdout) return patch_count
+def Checkout(commit_hash, git_dir=None, work_tree=None, force=False): + """Checkout the selected commit for this build + + Args: + commit_hash: Commit hash to check out + """ + pipe = ['git'] + if git_dir: + pipe.extend(['--git-dir', git_dir]) + if work_tree: + pipe.extend(['--work-tree', work_tree]) + pipe.append('checkout') + if force: + pipe.append('-f') + pipe.append(commit_hash) + result = command.RunPipe([pipe], capture=True, raise_on_error=False) + if result.return_code != 0: + raise OSError, 'git checkout (%s): %s' % (pipe, result.stderr) + +def Clone(git_dir, output_dir): + """Checkout the selected commit for this build + + Args: + commit_hash: Commit hash to check out + """ + pipe = ['git', 'clone', git_dir, '.'] + result = command.RunPipe([pipe], capture=True, cwd=output_dir) + if result.return_code != 0: + raise OSError, 'git clone: %s' % result.stderr + +def Fetch(git_dir=None, work_tree=None): + """Fetch from the origin repo + + Args: + commit_hash: Commit hash to check out + """ + pipe = ['git'] + if git_dir: + pipe.extend(['--git-dir', git_dir]) + if work_tree: + pipe.extend(['--work-tree', work_tree]) + pipe.append('fetch') + result = command.RunPipe([pipe], capture=True) + if result.return_code != 0: + raise OSError, 'git fetch: %s' % result.stderr + def CreatePatches(start, count, series): """Create a series of patches from the top of the current branch.
@@ -352,7 +466,8 @@ def GetAliasFile(): Returns: Filename of git alias file, or None if none """ - fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile') + fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile', + raise_on_error=False) if fname: fname = os.path.join(GetTopLevel(), fname.strip()) return fname @@ -384,6 +499,14 @@ def Setup(): if alias_fname: settings.ReadGitAliases(alias_fname)
+def GetHead(): + """Get the hash of the current HEAD + + Returns: + Hash of HEAD + """ + return command.OutputOneLine('git', 'show', '-s', '--pretty=format:%H') + if __name__ == "__main__": import doctest
diff --git a/tools/patman/patchstream.py b/tools/patman/patchstream.py index ad280cc..db2cc6c 100644 --- a/tools/patman/patchstream.py +++ b/tools/patman/patchstream.py @@ -237,7 +237,8 @@ class PatchStream: # Detect the start of a new commit elif commit_match: self.CloseCommit() - self.commit = commit.Commit(commit_match.group(1)[:7]) + # TODO: We should store the whole hash, and just display a subset + self.commit = commit.Commit(commit_match.group(1)[:8])
# Detect tags in the commit message elif tag_match: @@ -334,26 +335,47 @@ class PatchStream: self.Finalize()
-def GetMetaData(start, count): +def GetMetaDataForList(commit_range, git_dir=None, count=None, + series = Series()): """Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we are interested in.
Args: - start: Commit to start from: 0=HEAD, 1=next one, etc. - count: Number of commits to list + commit_range: Range of commits to count (e.g. 'HEAD..base') + git_dir: Path to git repositiory (None to use default) + count: Number of commits to list, or None for no limit + series: Series object to add information into. By default a new series + is started. + Returns: + A Series object containing information about the commits. """ - pipe = [['git', 'log', '--no-color', '--reverse', 'HEAD~%d' % start, - '-n%d' % count]] - stdout = command.RunPipe(pipe, capture=True) - series = Series() + params = ['git', 'log', '--no-color', '--reverse', commit_range] + if count is not None: + params[2:2] = ['-n%d' % count] + if git_dir: + params[1:1] = ['--git-dir', git_dir] + pipe = [params] + stdout = command.RunPipe(pipe, capture=True).stdout ps = PatchStream(series, is_log=True) for line in stdout.splitlines(): ps.ProcessLine(line) ps.Finalize() return series
+def GetMetaData(start, count): + """Reads out patch series metadata from the commits + + This does a 'git log' on the relevant commits and pulls out the tags we + are interested in. + + Args: + start: Commit to start from: 0=HEAD, 1=next one, etc. + count: Number of commits to list + """ + return GetMetaDataForList('HEAD~%d' % start, None, count) + def FixPatch(backup_dir, fname, series, commit): """Fix up a patch file, by adding/removing as required.
diff --git a/tools/patman/terminal.py b/tools/patman/terminal.py index 838c828..337a2a4 100644 --- a/tools/patman/terminal.py +++ b/tools/patman/terminal.py @@ -24,24 +24,32 @@ This module handles terminal interaction including ANSI color codes. """
+import os +import sys + +# Selection of when we want our output to be colored +COLOR_IF_TERMINAL, COLOR_ALWAYS, COLOR_NEVER = range(3) + class Color(object): """Conditionally wraps text in ANSI color escape sequences.""" BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) BOLD = -1 - COLOR_START = '\033[1;%dm' + BRIGHT_START = '\033[1;%dm' + NORMAL_START = '\033[22;%dm' BOLD_START = '\033[1m' RESET = '\033[0m'
- def __init__(self, enabled=True): + def __init__(self, colored=COLOR_IF_TERMINAL): """Create a new Color object, optionally disabling color output.
Args: enabled: True if color output should be enabled. If False then this class will not add color codes at all. """ - self._enabled = enabled + self._enabled = (colored == COLOR_ALWAYS or + (colored == COLOR_IF_TERMINAL and os.isatty(sys.stdout.fileno())))
- def Start(self, color): + def Start(self, color, bright=True): """Returns a start color code.
Args: @@ -52,7 +60,8 @@ class Color(object): otherwise returns empty string """ if self._enabled: - return self.COLOR_START % (color + 30) + base = self.BRIGHT_START if bright else self.NORMAL_START + return base % (color + 30) return ''
def Stop(self): @@ -63,10 +72,10 @@ class Color(object): returns empty string """ if self._enabled: - return self.RESET + return self.RESET return ''
- def Color(self, color, text): + def Color(self, color, text, bright=True): """Returns text with conditionally added color escape sequences.
Keyword arguments: @@ -78,9 +87,10 @@ class Color(object): returns text with color escape sequences based on the value of color. """ if not self._enabled: - return text + return text if color == self.BOLD: - start = self.BOLD_START + start = self.BOLD_START else: - start = self.COLOR_START % (color + 30) + base = self.BRIGHT_START if bright else self.NORMAL_START + start = base % (color + 30) return start + text + self.RESET

This tool handles building U-Boot to check that you have not broken it with your patch series. It can build each individual commit and report which boards fail on which commits, and which errors come up. It aims to make full use of multi-processor machines.
Buildman is not yet ready for prime time. I am posting it now to obtain feedback as to its operation and bugs, and to hopefully attract patches for these. It does some incorrect things, crashes, hangs, and use lots of disk space.
Documentation and caveats are in tools/buildman/README.
Signed-off-by: Simon Glass sjg@chromium.org --- tools/buildman/.gitignore | 1 + tools/buildman/README | 454 ++++++++++++++++++++++ tools/buildman/board.py | 159 ++++++++ tools/buildman/bsettings.py | 59 +++ tools/buildman/builder.py | 893 +++++++++++++++++++++++++++++++++++++++++++ tools/buildman/buildman | 1 + tools/buildman/buildman.py | 116 ++++++ tools/buildman/control.py | 164 ++++++++ tools/buildman/test.py | 194 ++++++++++ tools/buildman/toolchain.py | 156 ++++++++ 10 files changed, 2197 insertions(+), 0 deletions(-) create mode 100644 tools/buildman/.gitignore create mode 100644 tools/buildman/README create mode 100644 tools/buildman/board.py create mode 100644 tools/buildman/bsettings.py create mode 100644 tools/buildman/builder.py create mode 120000 tools/buildman/buildman create mode 100755 tools/buildman/buildman.py create mode 100644 tools/buildman/control.py create mode 100644 tools/buildman/test.py create mode 100644 tools/buildman/toolchain.py
diff --git a/tools/buildman/.gitignore b/tools/buildman/.gitignore new file mode 100644 index 0000000..0d20b64 --- /dev/null +++ b/tools/buildman/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/tools/buildman/README b/tools/buildman/README new file mode 100644 index 0000000..7820011 --- /dev/null +++ b/tools/buildman/README @@ -0,0 +1,454 @@ +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +What is this? +============= + +This tool handles building U-Boot to check that you have not broken it +with your patch series. It can build each individual commit and report +which boards fail on which commits, and which errors come up. It aims +to make full use of multi-processor machines. + + +Caveats +======= + +Buildman is not yet ready for prime time. I am posting it now to obtain +feedback as to its operation and bugs, and to hopefully attract patches for +these. It does some incorrect things, crashes, hangs, and use lots of disk +space. + +In particular Ctrl-C often does not stop buildman. Press Ctrl-Z, then use +'kill %1' or similar to kill it. + +The source code needs tidying up. It will change somewhat before its final +form. Even the coding style is somewhat inconsistent. The builder used to +work a different way, and I haven't yet decided whether to support both +options or rip out the old code. Also the code shared with patman needs to +be split out. Please be understanding! + +If you still want to try it, and promise to send a small targeted patch +along with your complaint, please read on. + + +Theory of Operation +=================== + +(please read this section in full twice or you will be perpetually confused) + +Buildman is a builder. It is not make, although it runs make. It does not +produce any useful output on the terminal while building, except for +progress information. All the output (errors, warnings and binaries if you +are lucky) is stored in output directories, which you can look at while +the buildis progressing, or when it is finished. + +Buildman starts multiple threads, and each thread builds for one board at +a time. A thread starts at the first commit, configures the source for your +board and builds it. Then it checks out the next commit and does an +incremental build. Eventually the thread reaches the last commit and stops. +If errors or warnings are found along the way, the thread will reconfigure +after every commit, and your build will be very slow. + +Buildman works in an entirely separate place from your U-Boot repository. +It creates a separate working directory for each thread, and puts the +output files in the working directory, organised by commit name and board +name, in a two-level hierarchy. + +Buildman is invoked in your U-Boot directory, the one with the .git +directory. It clones this repository into a copy for each thread, and the +threads do not affect the state of your git repository. Any checkouts done +by the thread affect only the working directory for that thread. + +Buildman automatically selects the correct toolchain for each board. You +must supply suitable toolchains, but buildman looks after finding the right +one. + +Buildman always builds a branch, and always builds the upstream commit as +well, for comparison. It cannot build individual commits at present, unless +(maybe) you point it at an empty branch. Put all your commits in a branch, +set the branch's upstream to a valid value, and all will be well. Otherwise +buildman will perform random actions. Use -n to check what the random +actions might be. + +Buildman is optimised for building many commits at once, for many boards. +On multi-core machines, Buildman is fast because it uses most of the +available CPU power. When it gets to the end, or if you are building just +a few commits or boards, it will be pretty slow. + +Buildman lets you build all boards, or a subset. Specify the subset using +the board name, architecture name, SOC name, or anything else in the +boards.cfg file. So 'at91' will build all AT91 boards (arm), powerpc will +build all PowerPC boards. + +Buildman does not store intermediate object files. It copies the binary +output into a directory when a build is successful. It needs a fair bit +of disk space. + + +Setting up +========== + +1. Get the U-Boot source. You probably already have it, but if not these +steps should get you started with a repo and some commits for testing. + +$ cd /path/to/u-boot +$ git clone git://git.denx.de/u-boot.git . +$ git checkout -b my-branch origin/master +$ # Add some commits to the branch, reading for testing + +2. Create ~/.buildman to tell buildman where to find tool chains. As an +example: + +# Buildman settings file + +[toolchain] +root: / +rest: /toolchains/* +eldk: /opt/eldk-4.2 + +[toolchain-alias] +x86: i386 + + +This selects the available toolchain paths. Add the base directory for +each of your toolchains here. Buildman will search inside these directories +and also in any '/usr' and '/usr/bin' subdirectories. + +Make sure the tags (here root: rest: and eldk:) are unique. + +The toolchain-alias section indicates that the i386 toolchain should be used +to build x86 commits. + + +2. Check the available toolchains + +Run this check to make sure that you have a toolchain for every architecture. + +$ ./tools/buildman/buildman --list-tool-chains +Scanning for tool chains + - scanning path '/' + - looking in '/.' + - looking in '/bin' + - looking in '/usr/bin' + - found '/usr/bin/gcc' +Tool chain test: OK + - found '/usr/bin/c89-gcc' +Tool chain test: OK + - found '/usr/bin/c99-gcc' +Tool chain test: OK + - found '/usr/bin/x86_64-linux-gnu-gcc' +Tool chain test: OK + - scanning path '/toolchains/powerpc-linux' + - looking in '/toolchains/powerpc-linux/.' + - looking in '/toolchains/powerpc-linux/bin' + - found '/toolchains/powerpc-linux/bin/powerpc-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/powerpc-linux/usr/bin' + - scanning path '/toolchains/nds32le-linux-glibc-v1f' + - looking in '/toolchains/nds32le-linux-glibc-v1f/.' + - looking in '/toolchains/nds32le-linux-glibc-v1f/bin' + - found '/toolchains/nds32le-linux-glibc-v1f/bin/nds32le-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/nds32le-linux-glibc-v1f/usr/bin' + - scanning path '/toolchains/nios2' + - looking in '/toolchains/nios2/.' + - looking in '/toolchains/nios2/bin' + - found '/toolchains/nios2/bin/nios2-linux-gcc' +Tool chain test: OK + - found '/toolchains/nios2/bin/nios2-linux-uclibc-gcc' +Tool chain test: OK + - looking in '/toolchains/nios2/usr/bin' + - found '/toolchains/nios2/usr/bin/nios2-linux-gcc' +Tool chain test: OK + - found '/toolchains/nios2/usr/bin/nios2-linux-uclibc-gcc' +Tool chain test: OK + - scanning path '/toolchains/microblaze-unknown-linux-gnu' + - looking in '/toolchains/microblaze-unknown-linux-gnu/.' + - looking in '/toolchains/microblaze-unknown-linux-gnu/bin' + - found '/toolchains/microblaze-unknown-linux-gnu/bin/microblaze-unknown-linux-gnu-gcc' +Tool chain test: OK + - found '/toolchains/microblaze-unknown-linux-gnu/bin/mb-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/microblaze-unknown-linux-gnu/usr/bin' + - scanning path '/toolchains/mips-linux' + - looking in '/toolchains/mips-linux/.' + - looking in '/toolchains/mips-linux/bin' + - found '/toolchains/mips-linux/bin/mips-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/mips-linux/usr/bin' + - scanning path '/toolchains/old' + - looking in '/toolchains/old/.' + - looking in '/toolchains/old/bin' + - looking in '/toolchains/old/usr/bin' + - scanning path '/toolchains/i386-linux' + - looking in '/toolchains/i386-linux/.' + - looking in '/toolchains/i386-linux/bin' + - found '/toolchains/i386-linux/bin/i386-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/i386-linux/usr/bin' + - scanning path '/toolchains/bfin-uclinux' + - looking in '/toolchains/bfin-uclinux/.' + - looking in '/toolchains/bfin-uclinux/bin' + - found '/toolchains/bfin-uclinux/bin/bfin-uclinux-gcc' +Tool chain test: OK + - looking in '/toolchains/bfin-uclinux/usr/bin' + - scanning path '/toolchains/sparc-elf' + - looking in '/toolchains/sparc-elf/.' + - looking in '/toolchains/sparc-elf/bin' + - found '/toolchains/sparc-elf/bin/sparc-elf-gcc' +Tool chain test: OK + - looking in '/toolchains/sparc-elf/usr/bin' + - scanning path '/toolchains/arm-2010q1' + - looking in '/toolchains/arm-2010q1/.' + - looking in '/toolchains/arm-2010q1/bin' + - found '/toolchains/arm-2010q1/bin/arm-none-linux-gnueabi-gcc' +Tool chain test: OK + - looking in '/toolchains/arm-2010q1/usr/bin' + - scanning path '/toolchains/from' + - looking in '/toolchains/from/.' + - looking in '/toolchains/from/bin' + - looking in '/toolchains/from/usr/bin' + - scanning path '/toolchains/sh4-gentoo-linux-gnu' + - looking in '/toolchains/sh4-gentoo-linux-gnu/.' + - looking in '/toolchains/sh4-gentoo-linux-gnu/bin' + - found '/toolchains/sh4-gentoo-linux-gnu/bin/sh4-gentoo-linux-gnu-gcc' +Tool chain test: OK + - looking in '/toolchains/sh4-gentoo-linux-gnu/usr/bin' + - scanning path '/toolchains/avr32-linux' + - looking in '/toolchains/avr32-linux/.' + - looking in '/toolchains/avr32-linux/bin' + - found '/toolchains/avr32-linux/bin/avr32-gcc' +Tool chain test: OK + - looking in '/toolchains/avr32-linux/usr/bin' + - scanning path '/toolchains/m68k-linux' + - looking in '/toolchains/m68k-linux/.' + - looking in '/toolchains/m68k-linux/bin' + - found '/toolchains/m68k-linux/bin/m68k-linux-gcc' +Tool chain test: OK + - looking in '/toolchains/m68k-linux/usr/bin' +List of available toolchains (17): +arm : /toolchains/arm-2010q1/bin/arm-none-linux-gnueabi-gcc +avr32 : /toolchains/avr32-linux/bin/avr32-gcc +bfin : /toolchains/bfin-uclinux/bin/bfin-uclinux-gcc +c89 : /usr/bin/c89-gcc +c99 : /usr/bin/c99-gcc +i386 : /toolchains/i386-linux/bin/i386-linux-gcc +m68k : /toolchains/m68k-linux/bin/m68k-linux-gcc +mb : /toolchains/microblaze-unknown-linux-gnu/bin/mb-linux-gcc +microblaze: /toolchains/microblaze-unknown-linux-gnu/bin/microblaze-unknown-linux-gnu-gcc +mips : /toolchains/mips-linux/bin/mips-linux-gcc +nds32le : /toolchains/nds32le-linux-glibc-v1f/bin/nds32le-linux-gcc +nios2 : /toolchains/nios2/bin/nios2-linux-gcc +powerpc : /toolchains/powerpc-linux/bin/powerpc-linux-gcc +sandbox : /usr/bin/gcc +sh4 : /toolchains/sh4-gentoo-linux-gnu/bin/sh4-gentoo-linux-gnu-gcc +sparc : /toolchains/sparc-elf/bin/sparc-elf-gcc +x86_64 : /usr/bin/x86_64-linux-gnu-gcc + + +You can see that everything is covered, even some strange ones that won't +be used (c88 and c99). This is a feature. + + +How to run it +============= + +First do a dry run: (replace <branch> with a real, local branch) + +$ ./tools/buildman/buildman -b <branch> -n + +If it can't detect the upstream branch, try checking out the branch, and +doing something like 'git branch --set-upstream <branch> upstream/master' +or something similar. + +As an exmmple: + +Dry run, so not doing much. But I would do this: + +Building 18 commits for 1059 boards (4 threads, 1 job per thread) +Build directory: ../lcd9b + 5bb3505 Merge branch 'master' of git://git.denx.de/u-boot-arm + c18f1b4 tegra: Use const for pinmux_config_pingroup/table() + 2f043ae tegra: Add display support to funcmux + e349900 tegra: fdt: Add pwm binding and node + 424a5f0 tegra: fdt: Add LCD definitions for Tegra + 0636ccf tegra: Add support for PWM + a994fe7 tegra: Add SOC support for display/lcd + fcd7350 tegra: Add LCD driver + 4d46e9d tegra: Add LCD support to Nvidia boards + 991bd48 arm: Add control over cachability of memory regions + 54e8019 lcd: Add CONFIG_LCD_ALIGNMENT to select frame buffer alignment + d92aff7 lcd: Add support for flushing LCD fb from dcache after update + dbd0677 tegra: Align LCD frame buffer to section boundary + 0cff9b8 tegra: Support control of cache settings for LCD + 9c56900 tegra: fdt: Add LCD definitions for Seaboard + 5cc29db lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console + cac5a23 tegra: Enable display/lcd support on Seaboard + 49ff541 wip + +Total boards to build for each commit: 1059 + +This shows that it will build all 1059 boards, using 4 threads (because +we have a 4-core CPU). Each thread will run with -j1, meaning that each +make job will use a single CPU. The list of commits to be built helps you +confirm that things look about right. Notice that buildman has chosen a +'base' directory for you, immediately above your source tree. + +Buildman works entirely inside the base directory, here ../lcd9b, +creating a working directory for each thread, and creating output +directories for each commit and board. + + +To run the build for real, take off the -n: + +$ ./tools/buildman/buildman -b <branch> + +Buildman will set up some working directories, and get starting. After a +minute or so it will settle down to a steady pace, with a display like this: + +Building 18 commits for 1059 boards (4 threads, 1 job per thread) + 528 36 124 /19062 1:13:30 : SIMPC8313_SP + +This means that it is building 19062 board/commit combinations. So far it +has managed to succesfully build 528. Another 36 have built with warnings, +and 124 more didn't build at all. Buildman expects to complete the process +in an hour an 15 minutes. Use this time to buy a faster computer. + + +To find out how the build went, ask for a summary. You can do this either +before the build completes (presumably in another terminal) or or afterwards. +Let's work through an example of how this is used: + +$ ./tools/buildman/buildman -b lcd9b -s +... +01: Merge branch 'master' of git://git.denx.de/u-boot-arm + powerpc: + galaxy5200_LOWBOOT +02: tegra: Use const for pinmux_config_pingroup/table() +03: tegra: Add display support to funcmux +04: tegra: fdt: Add pwm binding and node +05: tegra: fdt: Add LCD definitions for Tegra +06: tegra: Add support for PWM +07: tegra: Add SOC support for display/lcd +08: tegra: Add LCD driver +09: tegra: Add LCD support to Nvidia boards +10: arm: Add control over cachability of memory regions +11: lcd: Add CONFIG_LCD_ALIGNMENT to select frame buffer alignment +12: lcd: Add support for flushing LCD fb from dcache after update + arm: + lubbock +13: tegra: Align LCD frame buffer to section boundary +14: tegra: Support control of cache settings for LCD +15: tegra: fdt: Add LCD definitions for Seaboard +16: lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console +17: tegra: Enable display/lcd support on Seaboard +18: wip + +This shows which commits have succeeded and which have failed. In this case +the build is still in progress so many boards are not built yet (use -u to +see which ones). But still we can see a few failures. The galaxy5200_LOWBOOT +never builds correctly. This could be a problem with our toolchain, or it +could be a bug in the upstream. The good news is that we probably don't need +to blame our commits. The bad news is it isn't tested on that board. + +Commit 12 broke lubbock. That's what the '+ lubbock' means. The failure +is never fixed by a later commit, or you would see lubbock again, in green, +without the +. + +To see the actual error: + +$ ./tools/buildman/buildman -b <branch> -se lubbock +... +12: lcd: Add support for flushing LCD fb from dcache after update + arm: + lubbock ++common/libcommon.o: In function `lcd_sync': ++/u-boot/lcd9b/.bm-work/00/common/lcd.c:120: undefined reference to `flush_dcache_range' ++arm-none-linux-gnueabi-ld: BFD (Sourcery G++ Lite 2010q1-202) 2.19.51.20090709 assertion fail /scratch/julian/2010q1-release-linux-lite/obj/binutils-src-2010q1-202-arm-none-linux-gnueabi-i686-pc-linux-gnu/bfd/elf32-arm.c:12572 ++make: *** [/u-boot/lcd9b/.bm-work/00/build/u-boot] Error 139 +13: tegra: Align LCD frame buffer to section boundary +14: tegra: Support control of cache settings for LCD +15: tegra: fdt: Add LCD definitions for Seaboard +16: lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console +-/u-boot/lcd9b/.bm-work/00/common/lcd.c:120: undefined reference to `flush_dcache_range' ++/u-boot/lcd9b/.bm-work/00/common/lcd.c:125: undefined reference to `flush_dcache_range' +17: tegra: Enable display/lcd support on Seaboard +18: wip + +So the problem is in lcd.c, and affects those lubbock. This information +should be enough to work out what that commit is doing to break these +boards. + +If you see error lines marked with - that means that the errors were fixed +by that commit. Sometimes commits can be in the wrong order, so that a +breakage is introduced for a few commits and fixed by later commits. This +shows up clearly with buildman. + +At commit 16, the error moves - you can see that the old error at line 120 +is fixed, but there is a new one at line 126. This is probably only because +we added some code and moved the broken line futher down the file. + +The full build output in this case is available in: + +../lcd9b/12_of_18_gd92aff7_lcd--Add-support-for/lubbock/ + + done: Indicates the build was done, and holds the return code from make. + This is 0 for a good build, typically 2 for a failure. + + err: Output from stderr, if any. Errors and warnings appear here. + + log: Output from stdout. Normally there isn't any since buildman runs + in silent mode for now. + + toolchain: Shows information about the toolchain used for the build. + +It is possible to get the build output there also. Use the -k option for +this. In that case you will also see some output files. + + System.map toolchain u-boot u-boot.bin u-boot.map + + + +Other options +============= + +Buildman has various other command line options. Try --help to see them. + + +TODO +==== + +This has mostly be written in my spare time as a response to my difficulties +in testing large series of patches. Apart from tidying up there is quite a +bit of scope for improvement. Things like tracking commits which increase +binary size, better error diffs, easier access to log files, error display +while building, saving all output files (e.g. SPL). + + +Credits +======= + +Thanks to Grant Grundler grundler@chromium.org for his ideas for improving +the build speed by building all commits for a board instead of the other +way around. + + + +Simon Glass +sjg@chromium.org +Halloween 2012 diff --git a/tools/buildman/board.py b/tools/buildman/board.py new file mode 100644 index 0000000..0854ae5 --- /dev/null +++ b/tools/buildman/board.py @@ -0,0 +1,159 @@ +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +class Board: + """A particular board that we can build""" + def __init__(self, target, arch, cpu, board_name, vendor, soc, options): + """Create a new board type. + + Args: + + """ + self.target = target + self.arch = arch + self.cpu = cpu + self.board_name = board_name + self.vendor = vendor + self.soc = soc + self.props = [self.target, self.arch, self.cpu, self.board_name, + self.vendor, self.soc] + self.options = options + self.build_it = False + + +class Boards: + """Manage a list of boards.""" + def __init__(self): + # Use a simple list here, sinc OrderedDict requires Python 2.7 + self._boards = [] + + def AddBoard(self, board): + """Add a new board to the list. + + The board's target member must not already exist in the board list. + + Args: + board: board to add + """ + self._boards.append(board) + + def ReadBoards(self, fname): + """Read a list of boards from a board file. + + Args: + fname: Filename of boards.cfg file + """ + with open(fname, 'r') as fd: + for line in fd: + if line[0] == '#': + continue + fields = line.split() + if not fields: + continue + for upto in range(len(fields)): + if fields[upto] == '-': + fields[upto] = '' + while len(fields) < 7: + fields.append('') + + board = Board(*fields) + self.AddBoard(board) + + + def GetList(self): + """Return a list of available boards. + + Returns: + List of Board objects + """ + return self._boards + + def GetDict(self): + """Build a dictionary containing all the boards. + + Returns: + Dictionary: + key is board.target + value is board + """ + board_dict = {} + for board in self._boards: + board_dict[board.target] = board + return board_dict + + def GetSelectedDict(self): + """Return a dictionary containing the selected boards + + Returns: + List of Board objects that are marked selected + """ + board_dict = {} + for board in self._boards: + if board.build_it: + board_dict[board.target] = board + return board_dict + + def GetSelected(self): + """Return a list of selected boards + + Returns: + List of Board objects that are marked selected + """ + return [board for board in self._boards if board.build_it] + + def GetSelectedNames(self): + """Return a list of selected boards + + Returns: + List of board names that are marked selected + """ + return [board.target for board in self._boards if board.build_it] + + def SelectBoards(self, args): + """Mark boards selected based on args + + Args: + List of strings specifying boards to include, either named, or + by their target, architecture, cpu, vendor or soc. If empty, all + boards are selected. + + Returns: + Dictionary which holds the number of boards which were selected + due to each argument, arranged by argument. + """ + result = {} + for arg in args: + result[arg] = 0 + result['all'] = 0 + + for board in self._boards: + if args: + for arg in args: + if arg in board.props: + if not board.build_it: + board.build_it = True + result[arg] += 1 + result['all'] += 1 + else: + board.build_it = True + result['all'] += 1 + + return result diff --git a/tools/buildman/bsettings.py b/tools/buildman/bsettings.py new file mode 100644 index 0000000..c142eeb --- /dev/null +++ b/tools/buildman/bsettings.py @@ -0,0 +1,59 @@ +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +import ConfigParser +import os + + +def Setup(fname=''): + """Set up the settings module by reading config files + + Args: + config_fname: Config filename to read ('' for default) + """ + global settings + global config_fname + + settings = ConfigParser.SafeConfigParser() + config_fname = fname + if config_fname == '': + config_fname = '%s/.buildman' % os.getenv('HOME') + if config_fname: + settings.read(config_fname) + +def GetItems(section): + """Get the items from a section of the config. + + Args: + section: name of section to retrieve + + Returns: + List of (name, value) tuples for the section + """ + try: + return settings.items(section) + except ConfigParser.NoSectionError as e: + print e + print ("Warning: No toolc hains - please add a [toolchain] section " + "to your buildman config file %s" % config_fname) + return [] + except: + raise diff --git a/tools/buildman/builder.py b/tools/buildman/builder.py new file mode 100644 index 0000000..33efcd3 --- /dev/null +++ b/tools/buildman/builder.py @@ -0,0 +1,893 @@ +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +import collections +import errno +from datetime import datetime, timedelta +import glob +import os +import re +import Queue +import shutil +import string +import sys +import threading +import time + +import command +import gitutil +import terminal +import toolchain + + +""" +Theory of Operation + +Buildman works by keeping the machine as busy as possible, building different +commits for different boards on multiple CPUs at once. + +The source repo (self.git_dir) contains all the commits to be built. Each +thread works on a single board at a time. It checks out the first commit, +configures it for that board, then builds it. Then it checks out the next +commit and builds it (typically without re-configuring). When it runs out +of commits, it gets another job from the builder and starts again with that +board. + +Clearly the builder threads could work either way - they could check out a +commit and then built it for all boards. Using separate directories for each +commit/board pair they could leave their build product around afterwards +also. + +The intent behind building a single board for multiple commits, is to make +use of incremental builds. Since each commit is built incrementally from +the previous one, builds are faster. Reconfiguring for a different board +removes all intermediate object files. + +Many threads can be working at once, but each has its own working directory. +When a thread finishes a build, it puts the output files into a result +directory. + +The base directory used by buildman is normally '../<branch>', i.e. +a directory higher than the source repository and named after the branch +being build. + +Within the base directory, we have one subdirectory for each commit. Within +that is one subdirectory for each board. Within that is the build output for +that commit/board combination. + +Buildman also create working directories for each thread, in a .buildman/ +subdirectory in the base dir. + +As an example, say we are building branch 'us-net' for boards 'sandbox' and +'seaboard', and say that us-net has two commits. We will have directories +like this: + +us-net/ base directory + 01_of_02_g4ed4ebc_net--Add-tftp-speed-/ + sandbox/ + u-boot.bin + seaboard/ + u-boot.bin + 02_of_02_g4ed4ebc_net--Check-tftp-comp/ + sandbox/ + u-boot.bin + seaboard/ + u-boot.bin + .bm-work/ + 00/ working directory for thread 0 (contains source checkout) + build/ build output + 01/ working directory for thread 1 + build/ build output + ... +u-boot/ source directory + .git/ repository + + +TODO(sjg@chromium.org): Add option to retain all files (~5GB per commit) +""" + +# Possible build outcomes +OUTCOME_OK, OUTCOME_WARNING, OUTCOME_ERROR, OUTCOME_UNKNOWN = range(4) + +trans_valid_chars = string.maketrans("/: ", "---") + + +def Mkdir(dirname): + try: + os.mkdir(dirname) + except OSError as err: + if err.errno == errno.EEXIST: + pass + else: + raise + +class BuilderJob: + """Holds information about a job to be performed by a thread""" + def __init__(self): + self.board = None # Board to build (Board object) + # List of commits to build (Commit objects). If None, just build + # currently-checked-out commit. + self.commits = [] + + +class ResultThread(threading.Thread): + """This thread processes results from builder threads. + + It simply passes the results on to the builder. + """ + def __init__(self, builder): + """Set up a new result thread + + Args: + builder: Builder which will be sent each result + """ + threading.Thread.__init__(self) + self.builder = builder + + def run(self): + """Called to start up the result thread. + + We collect the next result job and pass it on to the build. + """ + while True: + result = self.builder.out_queue.get() + self.builder.ProcessResult(result) + self.builder.out_queue.task_done() + + +class BuilderThread(threading.Thread): + """This thread builds U-Boot for a particular board. + + An input queue provides each new job. We run 'make' to build U-Boot + and then pass the results on to the output queue. + """ + def __init__(self, builder, thread_num): + """Set up a new builder thread. + + Args: + builder: The builder which provides the work for this thread. + We obtain some parameters directly from the builder. + """ + threading.Thread.__init__(self) + self.builder = builder + self.thread_num = thread_num + + def Make(self, commit, brd, stage, cwd, *args, **kwargs): + """Run 'make' on a particular commit and board. + + The source code will already be checked out, so the 'commit' + argument is only for information. + + Args: + commit: Commit object that is being built + brd: Board object that is being built + stage: Stage of the build. Valid stages are: + distclean - can be called to clean source + config - called to configure for a board + build - the main make invocation - it does the build + args: A list of arguments to pass to 'make' + kwargs: A list of keyword arguments to pass to command.RunPipe() + + Returns: + + """ + return self.builder.do_make(commit, brd, stage, cwd, *args, + **kwargs) + + def RunCommit(self, commit_upto, brd, work_dir, do_config, force_build): + """Build a particular commit. + + If the build is already done, and we are not forcing a build, we skip + the build and just return the previously-saved results. + + Args: + commit_upto: Commit number to build (0...n-1) + brd: Board object to build + work_dir: Directory to which the source will be checked out + do_config: True to run a make <board>_config on the source + force_build: Force a build even if one was previously done + """ + # Create a default object - it will be overwritte by the call to + # self.Make() below, in the even that we do a build. + result = command.CommandResult() + result.return_code = 0 + out_dir = os.path.join(work_dir, 'build') + + # Checkout the commit_count + if commit_upto is not None: + commit = self.builder.commits[commit_upto] + gitutil.Checkout(commit.hash, os.path.join(work_dir, '.git'), + work_dir, force=True) + else: + commit = self.builder.commit # Ick, fix this for BuildCommits() + + # Check if the job was already completed last time + done_file = self.builder.GetDoneFile(commit_upto, brd.target) + result.already_done = os.path.exists(done_file) + if result.already_done and not force_build: + with open(done_file, 'r') as fd: + result.return_code = int(fd.readline()) + err_file = self.builder.GetErrFile(commit_upto, brd.target) + if os.path.exists(err_file) and os.stat(err_file).st_size: + result.stderr = 'bad' + else: + if not self.toolchain: + try: + self.toolchain = self.builder.toolchains.Select(brd.arch) + except ValueError as err: + result.return_code = 10 + result.stdout = '' + result.stderr = str(err) + # TODO(sjg@chromium.org): This gets swallowed, but needs + # to be reported. + + if self.toolchain: + env = dict(os.environ) + env['CROSS_COMPILE'] = self.toolchain.cross + env['PATH'] += (':' + self.toolchain.path) + Mkdir(out_dir) + args = ['O=build', '-s'] + if self.builder.num_jobs is not None: + args.extend(['-j', str(self.builder.num_jobs)]) + config_args = ['%s_config' % brd.target] + config_out = '' + + if do_config: + result = self.Make(commit, brd, 'distclean', work_dir, + 'distclean', *args, env=env) + result = self.Make(commit, brd, 'config', work_dir, + *(args + config_args), env=env) + config_out = result.combined + if result.return_code == 0: + result = self.Make(commit, brd, 'build', work_dir, *args, + env=env) + result.stdout = config_out + result.stdout + else: + result.return_code = 1 + result.stderr = 'No tool chain for %s\n' % brd.arch + result.already_done = False + + result.toolchain = self.toolchain + result.brd = brd + result.commit_upto = commit_upto + result.out_dir = out_dir + return result + + def _WriteResult(self, result, keep_outputs): + """Write a built result to the output directory. + + Args: + result: CommandResult object containing result to write + keep_outputs: True to store the output binaries, False + to delete them + """ + if result.return_code < 0: # Fatal error + return + + # Write the output and stderr + if not result.already_done: + output_dir = self.builder._GetOutputDir(result.commit_upto) + Mkdir(output_dir) + build_dir = self.builder.GetBuildDir(result.commit_upto, + result.brd.target) + Mkdir(build_dir) + + outfile = os.path.join(build_dir, 'log') + with open(outfile, 'w') as fd: + if result.stdout: + fd.write(result.stdout) + + errfile = self.builder.GetErrFile(result.commit_upto, + result.brd.target) + if result.stderr: + with open(errfile, 'w') as fd: + fd.write(result.stderr) + elif os.path.exists(errfile): + os.remove(errfile) + + if result.toolchain: + done_file = self.builder.GetDoneFile(result.commit_upto, + result.brd.target) + with open(done_file, 'w') as fd: + fd.write('%s' % result.return_code) + with open(os.path.join(build_dir, 'toolchain'), 'w') as fd: + print >>fd, 'gcc', result.toolchain.gcc + print >>fd, 'path', result.toolchain.path + print >>fd, 'cross', result.toolchain.cross + print >>fd, 'arch', result.toolchain.arch + fd.write('%s' % result.return_code) + + # Now write the actual build output + if keep_outputs: + patterns = ['u-boot', '*.bin', 'u-boot.dtb', '*.map'] + for pattern in patterns: + file_list = glob.glob(os.path.join(result.out_dir, pattern)) + for fname in file_list: + shutil.copy(fname, build_dir) + + def RunJob(self, job): + brd = job.board + work_dir = self.builder.GetThreadDir(self.thread_num) + self.toolchain = None + if job.commits: + # Run 'make board_config' on the first commit + do_config = True + commit_upto = 0 + force_build = False + while commit_upto < len(job.commits): + result = self.RunCommit(commit_upto, brd, work_dir, do_config, + force_build or self.builder.force_build) + + # If we built that commit, then config is done. But if we got + # an warning, reconfig next time to force it to build the same + # files that created warnings this time. Otherwise an + # incremental build may not build the same file, and we will + # think that the warning has gone away. + # We could avoid this by using -Werror everywhere... + # For errors, the problem doesn't happen, since presumably + # the build stopped and didn't generate output, so will retry + # that file next time. So we could detect warnings and deal + # with them specially here. For now, we just reconfigure if + # anything goes work. + # Of course this is substantially slower if there are build + # errors/warnings (e.g. 2-3x slower even if only 10% of builds + # have problems). + failed = result.return_code or result.stderr + if failed and not result.already_done and not do_config: + # If the last build failed, try again with a reconfigure + # Sometimes if the board_config.h file changes it can mess + # with dependencies, and we get: + # make: *** No rule to make target `include/autoconf.mk', + # needed by `depend'. + do_config = True + force_build = True + else: + force_build = False + do_config = result.already_done + if self.builder.force_config_on_failure: + if failed: + do_config = True + result.commit_upto = commit_upto + if result.return_code < 0: + raise ValueError('Interrupt') + self._WriteResult(result, job.keep_outputs) + self.builder.out_queue.put(result) + commit_upto += 1 + else: + result = self.RunCommit(None, True) + result.commit_upto = self.builder.upto + self.builder.out_queue.put(result) + + def run(self): + alive = True + while True: + job = self.builder.queue.get() + try: + if self.builder.active and alive: + self.RunJob(job) + except Exception as err: + alive = False + print err + self.builder.queue.task_done() + + +class Builder: + """Class for building U-Boot for a particular commit. + + Public members: + force_config_on_failure: If a commit fails for a board, disable + incremental building for the next commit we build for that + board, so that we will see all warnings/errors again. + Private members: + _timestamps: List of timestamps for the completion of the last + last _timestamp_count builds. Each is a datetime object. + _timestamp_count: Number of timestamps to keep in our list. + _build_period_us: Time taken for a single build (float object). + _complete_delay: Expected delay until completion (timedelta) + """ + def __init__(self, toolchains, base_dir, git_dir, num_threads, num_jobs, + checkout=True, show_unknown=True): + """Create a new Builder object + + Args: + toolchains: Toolchains object providing tool chains for the + builder + base_dir: Base directory to use for builder + git_dir: Git directory containing source repository + num_threads: Number of builder threads to run + num_jobs: Number of jobs to run at once (passed to make as -j) + checkout: True to check out source, False to skip that step. + This is used for testing. + """ + self.toolchains = toolchains + self.base_dir = base_dir + self._working_dir = os.path.join(base_dir, '.bm-work') + self.threads = [] + self.active = True + self.do_make = self.Make + self.checkout = checkout + self.num_threads = num_threads + self.num_jobs = num_jobs + self.already_done = 0 + self.force_build = False + self.git_dir = git_dir + self._show_unknown = show_unknown + self._timestamp_count = 10 + self._build_period_us = None + self._complete_delay = None + self._next_delay_update = datetime.now() + self.force_config_on_failure = True + + self.col = terminal.Color() + + self.queue = Queue.Queue() + self.out_queue = Queue.Queue() + for i in range(self.num_threads): + t = BuilderThread(self, i) + t.setDaemon(True) + t.start() + self.threads.append(t) + + self.last_line_len = 0 + t = ResultThread(self) + t.setDaemon(True) + t.start() + self.threads.append(t) + + ignore_lines = ['(make.*Waiting for unfinished)', '(Segmentation fault)'] + self.re_make_err = re.compile('|'.join(ignore_lines)) + + def __del__(self): + """Get rid of all threads created by the builder""" + for t in self.threads: + del t + + def _AddTimestamp(self): + """Add a new timestamp to the list and record the build period. + + The build period is the length of time taken to perform a single + build (one board, one commit). + """ + now = datetime.now() + self._timestamps.append(now) + count = len(self._timestamps) + delta = self._timestamps[-1] - self._timestamps[0] + seconds = delta.total_seconds() + + # If we have enough data, estimate build period (time taken for a + # single build) and completion time. + if count > 1 and self._next_delay_update < now: + self._next_delay_update = now + timedelta(seconds=2) + if seconds > 0: + self._build_period = float(seconds) / count + todo = self.count - self.upto + self._complete_delay = timedelta(microseconds= + self._build_period * todo * 1000000) + # Round it + self._complete_delay -= timedelta( + microseconds=self._complete_delay.microseconds) + + if seconds > 60: + self._timestamps.popleft() + count -= 1 + + def ClearLine(self, length): + """Clear an characters on the current line + + Make way for a new line of length 'length', by outputting enough + spaces to clear out the old line. Then remember the new length for + next time. + + Args: + length: Length of new line, in characters + """ + if length < self.last_line_len: + print ' ' * (self.last_line_len - length), + print '\r', + self.last_line_len = length + sys.stdout.flush() + + def SelectCommit(self, commit, checkout=True): + """Checkout the selected commit for this build + """ + self.commit = commit + if checkout and self.checkout: + gitutil.Checkout(commit.hash) + + def Make(self, commit, brd, stage, cwd, *args, **kwargs): + cmd = ['make'] + list(args) + result = command.RunPipe([cmd], capture=True, capture_stderr=True, + cwd=cwd, raise_on_error=False, **kwargs) + return result + + def ProcessResult(self, result): + """Process the result of a build, showing progress information + """ + col = terminal.Color() + if result: + target = result.brd.target + + if result.return_code < 0: + self.active = False + command.StopAll() + return + + self.upto += 1 + if result.return_code != 0: + self.fail += 1 + elif result.stderr: + self.warned += 1 + if result.already_done: + self.already_done += 1 + else: + target = '(starting)' + + ok = self.upto - self.warned - self.fail + line = '\r' + self.col.Color(self.col.GREEN, '%5d' % ok) + line += self.col.Color(self.col.YELLOW, '%5d' % self.warned) + line += self.col.Color(self.col.RED, '%5d' % self.fail) + + name = ' /%-5d ' % self.count + + # Time estimate + self._AddTimestamp() + if self._complete_delay: + name += '%s : ' % self._complete_delay + # When building all boards for a commit, we can print a commit + # progress message. + if result and result.commit_upto is None: + name += 'commit %2d/%-3d' % (self.commit_upto + 1, + self.commit_count) + + name += target + print line + name, + length = 13 + len(name) + self.ClearLine(length) + + def _GetOutputDir(self, commit_upto): + commit = self.commits[commit_upto] + subject = commit.subject.translate(trans_valid_chars) + commit_dir = ('%02d_of_%02d_g%s_%s' % (commit_upto + 1, + self.commit_count, commit.hash, subject[:20])) + output_dir = os.path.join(self.base_dir, commit_dir) + return output_dir + + def GetBuildDir(self, commit_upto, target): + output_dir = self._GetOutputDir(commit_upto) + return os.path.join(output_dir, target) + + def GetDoneFile(self, commit_upto, target): + return os.path.join(self.GetBuildDir(commit_upto, target), 'done') + + def GetErrFile(self, commit_upto, target): + output_dir = self.GetBuildDir(commit_upto, target) + return os.path.join(output_dir, 'err') + + """ + def SelectOutputDir(self, commit_upto): + subject = self.commit.subject.translate(trans_valid_chars) + self.output_dir = os.path.join(self.base_dir, '%02d_of_%02d_g%s_%s' % + (self.commit_upto + 1, self.commit_count, self.commit.hash, + subject[:20])) + """ + def FilterErrors(self, lines): + out_lines = [] + for line in lines: + if not self.re_make_err.search(line): + out_lines.append(line) + return out_lines + + def GetBuildOutcome(self, commit_upto, target): + done_file = self.GetDoneFile(commit_upto, target) + if os.path.exists(done_file): + with open(done_file, 'r') as fd: + return_code = int(fd.readline()) + err_lines = [] + err_file = self.GetErrFile(commit_upto, target) + if os.path.exists(err_file): + with open(err_file, 'r') as fd: + err_lines = self.FilterErrors(fd.readlines()) + + if return_code: + rc = OUTCOME_ERROR + elif len(err_lines): + rc = OUTCOME_WARNING + else: + rc = OUTCOME_OK + return rc, err_lines + return OUTCOME_UNKNOWN, [] + + def GetResultSummary(self, boards_selected, commit_upto): + """Calculate a summary of the results of building a commit. + + Returns: + Tuple: + Dict containing boards which passed building this commit. + keyed by board.target + List containing a summary of error/warning lines + """ + board_list = {} + err_lines_summary = [] + + for board in boards_selected.itervalues(): + outcome, err_lines = self.GetBuildOutcome(commit_upto, + board.target) + board_list[board.target] = outcome + for err in err_lines: + if err and not err.rstrip() in err_lines_summary: + err_lines_summary.append(err.rstrip()) + return board_list, err_lines_summary + + def AddOutcome(self, board_dict, arch_list, changes, char, color): + """ + Args: + board_dict: + arch_list: + changes: + color: + """ + for target in changes: + if target in board_dict: + arch = board_dict[target].arch + else: + arch = 'unknown' + str = ' ' + self.col.Color(color, char + ' ' + target) + if not arch in arch_list: + arch_list[arch] = str + else: + arch_list[arch] += str + + def ResetResultSummary(self, board_selected): + """Reset the results summary ready for use. + + Set up the base board list to be all those selected, and set the + error lines to empty. + + Following this, calls to PrintResultSummary() will use this + information to work out what has changed. + + Args: + board_selected: Dict containing boards to summarise, keyed by + board.target + """ + self._base_board_list = {} + for board in board_selected: + self._base_board_list[board] = 0 + self._base_err_lines = [] + + def PrintResultSummary(self, board_selected, board_list, err_lines): + """Compare results with the base results and display delta. + + Only boards mentioned in board_selected will be considered. + + Args: + board_selected: Dict containing boards to summarise, keyed by + board.target + board_list: Dict containing boards for which we built this + commit, keyed by board.target + err_lines: A list of errors for this commit + """ + better = [] # List of boards fixed since last commit + worse = [] # List of new broken boards since last commit + new = [] + unknown = [] + + for target in board_list: + if target not in board_selected: + continue + + # If the board was built last time + if target in self._base_board_list: + base_outcome = self._base_board_list[target] + outcome = board_list[target] + if outcome == OUTCOME_UNKNOWN: + unknown.append(target) + elif outcome < base_outcome: + better.append(target) + elif outcome > base_outcome: + worse.append(target) + else: + new.append(target) + + better_err = [] + worse_err = [] + for line in err_lines: + if line not in self._base_err_lines: + worse_err.append('+' + line) + for line in self._base_err_lines: + if line not in err_lines: + better_err.append('-' + line) + + # Display results by arch + if better or worse or unknown or new or worse_err or better_err: + arch_list = {} + self.AddOutcome(board_selected, arch_list, better, '', + self.col.GREEN) + self.AddOutcome(board_selected, arch_list, worse, '+', + self.col.RED) + self.AddOutcome(board_selected, arch_list, new, '*', self.col.BLUE) + if self._show_unknown: + self.AddOutcome(board_selected, arch_list, unknown, '?', + self.col.MAGENTA) + for arch, target_list in arch_list.iteritems(): + print '%10s: %s' % (arch, target_list) + if better_err: + print self.col.Color(self.col.GREEN, '\n'.join(better_err)) + if worse_err: + print self.col.Color(self.col.RED, '\n'.join(worse_err)) + self._base_board_list = board_list + self._base_err_lines = err_lines + + not_built = [] + for board in board_selected: + if not board in board_list: + not_built.append(board) + if not_built: + print "Boards not built (%d): %s" % (len(not_built), + ', '.join(not_built)) + + + def ShowSummary(self, commits, board_selected, show_errors): + """Build U-Boot for a given commit object + + Args: + commit: Commit objects to summarise + boards: List of Board objects to build + show_errors: Show errors that occured + """ + #board_selected = boards.GetSelected() + #board_selected_names = boards.GetSelectedNames() + self.commit_count = len(commits) + self.ResetResultSummary(board_selected) + self.commits = commits + + for commit_upto in range(self.commit_count): + board_list, err_lines = self.GetResultSummary(board_selected, + commit_upto) + msg = '%02d: %s' % (commit_upto + 1, commits[commit_upto].subject) + print self.col.Color(self.col.BLUE, msg) + self.PrintResultSummary(board_selected, board_list, + err_lines if show_errors else []) + + def SetupBuild(self, board_selected, commits): + """Set up ready to start a build. + + Args: + board_selected: Selected boards to build + commits: Selected commits to build + """ + self.count = len(board_selected) * len(commits) + self.upto = self.warned = self.fail = 0 + self._timestamps = collections.deque() + + def BuildBoardsForCommit(self, board_selected, keep_outputs): + #col = terminal.Color() + #print '%s %s %s' % (self.col.Color(self.col.GREEN, 'Building'), + #self.col.Color(self.col.YELLOW, self.commit.hash, bright=False), + #self.commit.subject), + #self.ClearLine(0) + self.SetupBuild(board_selected) + self.count = len(board_selected) + for brd in board_selected.itervalues(): + job = BuilderJob() + job.board = brd + job.commits = None + job.keep_outputs = keep_outputs + self.queue.put(brd) + + self.queue.join() + self.out_queue.join() + print + self.ClearLine(0) + + def BuildCommits(self, commits, board_selected, show_errors, keep_outputs): + self.commit_count = len(commits) + + self.ResetResultSummary(board_selected) + for self.commit_upto in range(self.commit_count): + self.SelectCommit(commits[self.commit_upto]) + self.SelectOutputDir() + Mkdir(self.output_dir) + + self.BuildBoardsForCommit(board_selected, keep_outputs) + board_list, err_lines = self.GetResultSummary() + self.PrintResultSummary(board_selected, board_list, + err_lines if show_errors else []) + + if self.already_done: + print '%d builds already done' % self.already_done + + def GetThreadDir(self, thread_num): + """Get the directory path to the working dir for a thread. + + thread_num: Number of thread to check. + """ + return os.path.join(self._working_dir, '%02d' % thread_num) + + def _PrepareThread(self, thread_num): + """Prepare the working for a thread. + + Args: + thread_num: Thread number (0, 1, ...) + """ + thread_dir = self.GetThreadDir(thread_num) + Mkdir(thread_dir) + git_dir = os.path.join(thread_dir, '.git') + + # Clone the repo if it doesn't already exist + # TODO(sjg@chromium): Perhaps some git hackery to symlink instead, so + # we have a private index but uses the origin repo's contents? + src_dir = os.path.abspath(self.git_dir) + if os.path.exists(git_dir): + gitutil.Fetch(git_dir, thread_dir) + else: + print 'Cloning repo for thread %d' % thread_num + gitutil.Clone(src_dir, thread_dir) + + def _PrepareWorkingSpace(self, max_threads): + """Prepare the working directory for use. + + Args: + max_threads: Maximum number of threads we expect to need. + """ + Mkdir(self._working_dir) + for thread in range(max_threads): + self._PrepareThread(thread) + + def _PrepareOutputSpace(self): + """Get the output directories ready to receive files. + + We delete any output directories which look like ones we need to + create. Having left over directories is confusing when the user wants + to check the output manually. + + Args: + commits: List of commits to be build + """ + dir_list = [] + for commit_upto in range(self.commit_count): + dir_list.append(self._GetOutputDir(commit_upto)) + + for dirname in glob.glob(os.path.join(self.base_dir, '*')): + if dirname not in dir_list: + shutil.rmtree(dirname) + + def BuildBoards(self, commits, board_selected, show_errors, keep_outputs): + self.commit_count = len(commits) + self.commits = commits + + self.ResetResultSummary(board_selected) + Mkdir(self.base_dir) + self._PrepareWorkingSpace(min(self.num_threads, len(board_selected))) + self._PrepareOutputSpace() + self.SetupBuild(board_selected, commits) + self.ProcessResult(None) + for brd in board_selected.itervalues(): + job = BuilderJob() + job.board = brd + job.commits = commits + job.keep_outputs = keep_outputs + self.queue.put(job) + + self.queue.join() + self.out_queue.join() + print + self.ClearLine(0) diff --git a/tools/buildman/buildman b/tools/buildman/buildman new file mode 120000 index 0000000..e4fba2d --- /dev/null +++ b/tools/buildman/buildman @@ -0,0 +1 @@ +buildman.py \ No newline at end of file diff --git a/tools/buildman/buildman.py b/tools/buildman/buildman.py new file mode 100755 index 0000000..34adc8b --- /dev/null +++ b/tools/buildman/buildman.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +"""See README for more information""" + +import multiprocessing +from optparse import OptionParser +import os +import re +import sys +import unittest + +# Bring in the patman libraries +our_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(our_path, '../patman')) + +# Our modules +import board +import builder +import checkpatch +import command +import control +import doctest +import gitutil +import patchstream +import terminal +import toolchain + +def RunTests(): + import test + + sys.argv = [sys.argv[0]] + suite = unittest.TestLoader().loadTestsFromTestCase(test.TestBuild) + result = unittest.TestResult() + suite.run(result) + + # TODO: Surely we can just 'print' result? + print result + for test, err in result.errors: + print err + for test, err in result.failures: + print err + + +parser = OptionParser() +parser.add_option('-b', '--branch', type='string', + help='Branch name to build') +parser.add_option('-c', '--count', dest='count', type='int', + default=-1, help='Run build on the top n commits') +parser.add_option('-e', '--show_errors', action='store_true', + default=False, help='Show errors and warnings') +parser.add_option('-f', '--force-build', dest='force_build', + action='store_true', default=False, + help='Force build of boards even if already built') +parser.add_option('-g', '--git', type='string', + help='Git repo containing branch to build', default='.') +parser.add_option('-H', '--full-help', action='store_true', dest='full_help', + default=False, help='Display the README file') +parser.add_option('-j', '--jobs', dest='jobs', type='int', + default=None, help='Number of jobs to run at once (passed to make)') +parser.add_option('-k', '--keep-outputs', action='store_true', + default=False, help='Keep all build output files (e.g. binaries)') +parser.add_option('--list-tool-chains', action='store_true', default=False, + help='List available tool chains') +parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run', + default=False, help="Do a try run (describe actions, but no nothing)") +parser.add_option('-Q', '--quick', action='store_true', + default=False, help='Do a rough build, with limited warning resolution') +parser.add_option('-s', '--summary', action='store_true', + default=False, help='Show a build summary') +parser.add_option('-t', '--test', action='store_true', dest='test', + default=False, help='run tests') +parser.add_option('-T', '--threads', type='int', + default=None, help='Number of builder threads to use') +parser.add_option('-u', '--show_unknown', action='store_true', + default=False, help='Show boards with unknown build result') + +parser.usage = """buildman -b <branch> [options] + +Build U-Boot for all commits in a branch. Use -n to do a dry run""" + +(options, args) = parser.parse_args() + +# Run our meagre tests +if options.test: + RunTests() +elif options.full_help: + pager = os.getenv('PAGER') + if not pager: + pager = 'more' + fname = os.path.join(os.path.dirname(sys.argv[0]), 'README') + command.Run(pager, fname) + +# Build selected commits for selected boards +else: + control.DoBuildman(options, args) diff --git a/tools/buildman/control.py b/tools/buildman/control.py new file mode 100644 index 0000000..fc4a96f --- /dev/null +++ b/tools/buildman/control.py @@ -0,0 +1,164 @@ +# +# Copyright (c) 2012 Google, Inc +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +import multiprocessing +import os +import sys + +import board +import bsettings +from builder import Builder +import gitutil +import patchstream +import terminal +import toolchain + +def GetPlural(count): + """Returns a plural 's' if count is not 1""" + return 's' if count != 1 else '' + +def GetActionSummary(is_summary, count, selected, options): + """Return a string summarising the intended action. + + Returns: + Summary string. + """ + str = '%s %d commit%s for %d boards' % ( + 'Built' if is_summary else 'Building', count, GetPlural(count), + len(selected)) + str += ' (%d thread%s, %d job%s per thread)' % (options.threads, + GetPlural(options.threads), options.jobs, GetPlural(options.jobs)) + return str + +def ShowActions(series, why_selected, boards_selected, builder, options): + """Display a list of actions that we would take, if not a dry run. + + Args: + series: Series object + why_selected: Dictionary where each key is a buildman argument + provided by the user, and the value is the boards brought + in by that argument. For example, 'arm' might bring in + 400 boards, so in this case the key would be 'arm' and + the value would be a list of board names. + boards_selected: List of selected boards + builder: The builder that will be used to build the commits + options: Command line options object + """ + col = terminal.Color() + print 'Dry run, so not doing much. But I would do this:' + print + print GetActionSummary(False, len(series.commits), boards_selected, + options) + print 'Build directory: %s' % builder.base_dir + for commit in series.commits: + print ' ', col.Color(col.YELLOW, commit.hash, bright=False), + print commit.subject + print + for arg in why_selected: + if arg != 'all': + print arg, ': %d boards' % why_selected[arg] + print ('Total boards to build for each commit: %d\n' % + why_selected['all']) + +def DoBuildman(options, args): + gitutil.Setup() + + bsettings.Setup() + options.git_dir = os.path.join(options.git, '.git') + + toolchains = toolchain.Toolchains() + toolchains.Scan(options.list_tool_chains) + if options.list_tool_chains: + toolchains.List() + print + return + + # Work out how many commits to build. We want to build everything on the + # branch. We also build the upstream commit as a control so we can see + # problems introduced by the first commit on the branch. + count = options.count + if count == -1: + count = gitutil.CountCommitsInBranch(options.git_dir, options.branch) + count += 1 # Build upstream commit also + + col = terminal.Color() + if not count: + str = ("No commits found to process in branch '%s': " + "set branch's upstream or use -c flag" % options.branch) + print col.Color(col.RED, str) + sys.exit(1) + + # Work out what subset of the boards we are building + boards = board.Boards() + boards.ReadBoards(os.path.join(options.git, 'boards.cfg')) + why_selected = boards.SelectBoards(args) + selected = boards.GetSelected() + if not len(selected): + print col.Color(col.RED, 'No matching boards found') + sys.exit(1) + + # Read the metadata from the commits. First look at the upstream commit, + # then the ones in the branch. We would like to do something like + # upstream/master~..branch but that isn't possible if upstream/master is + # a merge commit. It will list all the commits that form part of the + # merge. + range_expr = gitutil.GetRangeInBranch(options.git_dir, options.branch) + upstream_commit = gitutil.GetUpstream(options.git_dir, options.branch) + series = patchstream.GetMetaDataForList(upstream_commit, options.git_dir, + 1) + series = patchstream.GetMetaDataForList(range_expr, options.git_dir, None, + series) + + if not options.threads: + options.threads = min(multiprocessing.cpu_count(), len(selected)) + if not options.jobs: + options.jobs = max(1, (multiprocessing.cpu_count() + + len(selected) - 1) / len(selected)) + + output_dir = os.path.join('..', options.branch) + builder = Builder(toolchains, output_dir, options.git_dir, + options.threads, options.jobs, checkout=True, + show_unknown=options.show_unknown) + builder.force_config_on_failure = not options.quick + + # For a dry run, just show our actions as a sanity check + if options.dry_run: + ShowActions(series, why_selected, selected, builder, options) + else: + builder.force_build = options.force_build + + board_selected = boards.GetSelectedDict() + + print GetActionSummary(options.summary, count, board_selected, options) + + if options.summary: + builder.ShowSummary(series.commits, board_selected, + options.show_errors) + else: + # If we stop at any point, be sure to leave HEAD where it was + old_head = gitutil.GetHead() + try: + builder.BuildBoards(series.commits, board_selected, + options.show_errors, options.keep_outputs) + except: + gitutil.Checkout(old_head) + raise diff --git a/tools/buildman/test.py b/tools/buildman/test.py new file mode 100644 index 0000000..d1565f5 --- /dev/null +++ b/tools/buildman/test.py @@ -0,0 +1,194 @@ +# +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +import os +import shutil +import sys +import tempfile +import time +import unittest + +# Bring in the patman libraries +our_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(our_path, '../patman')) + +import board +import bsettings +import builder +import control +import command +import commit +import toolchain + +errors = [ + '''main.c: In function 'main_loop': +main.c:260:6: warning: unused variable 'joe' [-Wunused-variable] +''', + '''main.c: In function 'main_loop': +main.c:295:2: error: 'fred' undeclared (first use in this function) +main.c:295:2: note: each undeclared identifier is reported only once for each function it appears in +make[1]: *** [main.o] Error 1 +make: *** [common/libcommon.o] Error 2 +Make failed +''', + '''main.c: In function 'main_loop': +main.c:280:6: warning: unused variable 'mary' [-Wunused-variable] +''', + '''powerpc-linux-ld: warning: dot moved backwards before `.bss' +powerpc-linux-ld: warning: dot moved backwards before `.bss' +powerpc-linux-ld: u-boot: section .text lma 0xfffc0000 overlaps previous sections +powerpc-linux-ld: u-boot: section .rodata lma 0xfffef3ec overlaps previous sections +powerpc-linux-ld: u-boot: section .reloc lma 0xffffa400 overlaps previous sections +powerpc-linux-ld: u-boot: section .data lma 0xffffcd38 overlaps previous sections +powerpc-linux-ld: u-boot: section .u_boot_cmd lma 0xffffeb40 overlaps previous sections +powerpc-linux-ld: u-boot: section .bootpg lma 0xfffff198 overlaps previous sections +''' +] + + +# hash, subject, return code, list of errors/warnings +commits = [ + ['1234', 'upstream/master, ok', 0, []], + ['5678', 'Second commit, a warning', 0, errors[0:1]], + ['9012', 'Third commit, error', 1, errors[0:2]], + ['3456', 'Fourth commit, warning', 0, [errors[0], errors[2]]], + ['7890', 'Fifth commit, link errors', 1, [errors[0], errors[3]]], + ['abcd', 'Sixth commit, fixes all errors', 0, []] +] + +boards = [ + ['board0', 'arm', 'armv7', 'ARM Board 1', 'Tester', '', ''], + ['board1', 'arm', 'armv7', 'ARM Board 2', 'Tester', '', ''], + ['board2', 'powerpc', 'powerpc', 'PowerPC board 1', 'Tester', '', ''], + ['board3', 'powerpc', 'mpc5xx', 'PowerPC board 2', 'Tester', '', ''], + ['board4', 'sandbox', 'sandbox', 'Sandbox board', 'Tester', '', ''] +] + +class Options: + """Class that holds build options""" + pass + +class TestBuild(unittest.TestCase): + """Test buildman + + TODO: Write tests for the rest of the functionality + """ + def setUp(self): + # Set up commits to build + self.commits = [] + sequence = 0 + for commit_info in commits: + comm = commit.Commit(commit_info[0]) + comm.subject = commit_info[1] + comm.return_code = commit_info[2] + comm.error_list = commit_info[3] + comm.sequence = sequence + sequence += 1 + self.commits.append(comm) + + # Set up boards to build + self.boards = board.Boards() + for brd in boards: + self.boards.AddBoard(board.Board(*brd)) + self.boards.SelectBoards([]) + + # Set up the toolchains + bsettings.Setup() + self.toolchains = toolchain.Toolchains() + self.toolchains.Add('arm-linux-gcc', test=False) + self.toolchains.Add('sparc-linux-gcc', test=False) + self.toolchains.Add('powerpc-linux-gcc', test=False) + self.toolchains.Add('gcc', test=False) + + def Make(self, commit, brd, stage, *args, **kwargs): + result = command.CommandResult() + boardnum = int(brd.target[-1]) + result.return_code = 0 + result.stderr = '' + result.stdout = ('This is the test output for board %s, commit %s' % + (brd.target, commit.hash)) + if boardnum >= 1 and boardnum >= commit.sequence: + result.return_code = commit.return_code + result.stderr = ''.join(commit.error_list) + if stage == 'build': + target_dir = None + for arg in args: + if arg.startswith('O='): + target_dir = arg[2:] + + if not os.path.isdir(target_dir): + os.mkdir(target_dir) + #time.sleep(.2 + boardnum * .2) + return result + + def testBasic(self): + """Test basic builder operation""" + return + + base_dir = tempfile.mkdtemp() + if not os.path.isdir(base_dir): + os.mkdir(base_dir) + build = builder.Builder(self.toolchains, base_dir, num_jobs=2, + checkout=False) + build.do_make = self.Make + board_selected = self.boards.GetSelectedDict() + + build.BuildCommits(self.commits, board_selected, False) + print + #del build + #build = builder.Builder(self.toolchains, base_dir, num_jobs=2, + #checkout=False) + #build.do_make = self.Make + #build.BuildCommits(self.commits, self.boards) + + build.ShowSummary(self.commits, board_selected, True) + + #shutil.rmtree(base_dir) + #print base_dir + print '\r', + + def testGit(self): + """Test basic builder operation""" + base_dir = tempfile.mkdtemp() + if not os.path.isdir(base_dir): + os.mkdir(base_dir) + options = Options() + options.git = '/home/sjg/u' + options.summary = False + options.jobs = None + options.dry_run = False + #options.git = os.path.join(base_dir, 'repo') + options.branch = 'us-disk' + options.force_build = False + options.list_tool_chains = False + options.count = -1 + options.git_dir = None + options.threads = None + options.show_unknown = False + options.quick = False + options.show_errors = False + options.keep_outputs = False + args = ['tegra20'] + control.DoBuildman(options, args) + +if __name__ == "__main__": + unittest.main() diff --git a/tools/buildman/toolchain.py b/tools/buildman/toolchain.py new file mode 100644 index 0000000..374932b --- /dev/null +++ b/tools/buildman/toolchain.py @@ -0,0 +1,156 @@ +# Copyright (c) 2012 The Chromium OS Authors. +# +# See file CREDITS for list of people who contributed to this +# project. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +import glob +import os + +import bsettings +import command + +class Toolchain: + """A single toolchain""" + + def __init__(self, fname, test, verbose=False): + """Create a new toolchain object. + + Args: + fname: Filename of the gcc component + test: True to run the toolchain to test it + """ + self.gcc = fname + self.path = os.path.dirname(fname) + self.cross = os.path.basename(fname)[:-3] + pos = self.cross.find('-') + self.arch = self.cross[:pos] if pos != -1 else 'sandbox' + + os.environ['CROSS_COMPILE'] = self.cross + cmd = [fname, '--version'] + if test: + result = command.RunPipe([cmd], capture=True) + self.ok = result.return_code == 0 + if verbose: + print 'Tool chain test: ', + if self.ok: + print 'OK' + else: + print 'BAD' + print 'Command: ', cmd + print result.stdout + print result.stderr + else: + self.ok = True + self.priority = self.GetPriority(fname) + + def GetPriority(self, fname): + """Return the priority of the toolchain. + + Args: + fname: Filename of toolchain + Returns: + Priority of toolchain, 0=highest, 20=lowest. + """ + priority_list = ['-elf', '-unknown-linux-gnu', '-linux', '-elf', + '-none-linux-gnueabi', '-uclinux', '-none-eabi', + '-gentoo-linux-gnu', '-linux-gnueabi', '-le-linux', '-uclinux'] + for prio in range(len(priority_list)): + if priority_list[prio] in fname: + return prio + return prio + +class Toolchains: + """Manage a list of toolchains for building U-Boot + + We select one toolchain for each architecture type + """ + + def __init__(self): + self.toolchains = {} + self.paths = [] + for name, value in bsettings.GetItems('toolchain'): + if '*' in value: + self.paths += glob.glob(value) + else: + self.paths.append(value) + + + def Add(self, fname, test=True, verbose=False): + """Add a toolchain to our list + + We select the given toolchain as our preferred one for its + architecture if it is a higher priority than the others. + + Args: + fname: Filename of toolchain's gcc driver + test: True to run the toolchain to test it + """ + toolchain = Toolchain(fname, test, verbose) + add_it = toolchain.ok + if toolchain.arch in self.toolchains: + add_it = (toolchain.priority < + self.toolchains[toolchain.arch].priority) + if add_it: + self.toolchains[toolchain.arch] = toolchain + + def Scan(self, verbose): + """Scan for available toolchains and select the best for each arch. + + We look for all the toolchains we can file, figure out the + architecture for each, and whether it works. Then we select the + highest priority toolchain for each arch. + + Args: + verbose: True to print out progress information + """ + if verbose: print 'Scanning for tool chains' + for path in self.paths: + if verbose: print " - scanning path '%s'" % path + for subdir in ['.', 'bin', 'usr/bin']: + dirname = os.path.join(path, subdir) + if verbose: print " - looking in '%s'" % dirname + for fname in glob.glob(dirname + '/*gcc'): + if verbose: print " - found '%s'" % fname + self.Add(fname, True, verbose) + + def List(self): + """List out the selected toolchains for each architecture""" + print 'List of available toolchains (%d):' % len(self.toolchains) + if len(self.toolchains): + for key, value in sorted(self.toolchains.iteritems()): + print '%-10s: %s' % (key, value.gcc) + else: + print 'None' + + def Select(self, arch): + """Returns the toolchain for a given architecture + + Args: + args: Name of architecture (e.g. 'arm', 'ppc_8xx') + + returns: + toolchain object, or None if none found + """ + for name, value in bsettings.GetItems('toolchain-alias'): + if arch == name: + arch = value + + if not arch in self.toolchains: + raise ValueError, ("No tool chain found for arch '%s'" % arch) + return self.toolchains[arch]

Dear Simon Glass,
In message 1351718752-6832-2-git-send-email-sjg@chromium.org you wrote:
This tool handles building U-Boot to check that you have not broken it with your patch series. It can build each individual commit and report which boards fail on which commits, and which errors come up. It aims to make full use of multi-processor machines.
Buildman is not yet ready for prime time. I am posting it now to obtain feedback as to its operation and bugs, and to hopefully attract patches for these. It does some incorrect things, crashes, hangs, and use lots of disk space.
Can you please explain a bit if or how this is related to MAKEALL? At first (_very_ sshort) glance it appears to be a completely different tool - but then, do we need two separate tools for appearently pretty similar purposes?
Best regards,
Wolfgang Denk

On Wed, Oct 31, 2012 at 11:46:23PM +0100, Wolfgang Denk wrote:
Dear Simon Glass,
In message 1351718752-6832-2-git-send-email-sjg@chromium.org you wrote:
This tool handles building U-Boot to check that you have not broken it with your patch series. It can build each individual commit and report which boards fail on which commits, and which errors come up. It aims to make full use of multi-processor machines.
Buildman is not yet ready for prime time. I am posting it now to obtain feedback as to its operation and bugs, and to hopefully attract patches for these. It does some incorrect things, crashes, hangs, and use lots of disk space.
Can you please explain a bit if or how this is related to MAKEALL? At first (_very_ sshort) glance it appears to be a completely different tool - but then, do we need two separate tools for appearently pretty similar purposes?
I'll let Simon explain what Buildman handles better/worse than MAKEALL but my 2 cents is that MAKEALL is like patchwork in that it's a tool we all use and many of us wish was better about X/Y/Z. Unlike patchwork, it's bothered various folks enough to get changes made (Joe and his bugfixes last night, I've pastebin'd my wrapper a number of times, Simon has this tool going).

Hi Wolfgang,
On Wed, Oct 31, 2012 at 3:51 PM, Tom Rini trini@ti.com wrote:
On Wed, Oct 31, 2012 at 11:46:23PM +0100, Wolfgang Denk wrote:
Dear Simon Glass,
In message 1351718752-6832-2-git-send-email-sjg@chromium.org you wrote:
This tool handles building U-Boot to check that you have not broken it with your patch series. It can build each individual commit and report which boards fail on which commits, and which errors come up. It aims to make full use of multi-processor machines.
Buildman is not yet ready for prime time. I am posting it now to obtain feedback as to its operation and bugs, and to hopefully attract patches for these. It does some incorrect things, crashes, hangs, and use lots of disk space.
Can you please explain a bit if or how this is related to MAKEALL? At first (_very_ sshort) glance it appears to be a completely different tool - but then, do we need two separate tools for appearently pretty similar purposes?
I'll let Simon explain what Buildman handles better/worse than MAKEALL but my 2 cents is that MAKEALL is like patchwork in that it's a tool we all use and many of us wish was better about X/Y/Z. Unlike patchwork, it's bothered various folks enough to get changes made (Joe and his bugfixes last night, I've pastebin'd my wrapper a number of times, Simon has this tool going).
Yes...it's mostly for building a list of commits (e.g. an entire branch) and automatically tracking and showing what boards break between commits. It is optimised for this - e.g. it can build 22 commits for 1000 boards (22,000 builds) in about an hour on a fast machine, which is a few times faster than I have managed with MAKEALL. If run a second time it doesn't rebuild commits it has already done, which can save time. Also it handles the toolchains mostly automatically.
It is a completely different tool, yes, but I have found myself using it instead of what I previously used: MAKEALL plus Mike's 'buildall' wrapper. The main thing I like about it is that it quickly shows me which builds are broken by which commits, and what the errors were.
I have posted it since it's not a lot of use having it privately - it might be useful to others.
Regards, Simon
-- Tom

Dear Simon,
In message CAPnjgZ1qV=Odn93NMWEeAOzOOeBHyWwXB21s+n+cXXh0nan=eg@mail.gmail.com you wrote:
Yes...it's mostly for building a list of commits (e.g. an entire branch) and automatically tracking and showing what boards break between commits. It is optimised for this - e.g. it can build 22 commits for 1000 boards (22,000 builds) in about an hour on a fast machine, which is a few times faster than I have managed with MAKEALL.
Do you have an explanation why this is so? There must be some fundamental difference, and I would really like to understand that.
It is a completely different tool, yes, but I have found myself using it instead of what I previously used: MAKEALL plus Mike's 'buildall' wrapper. The main thing I like about it is that it quickly shows me which builds are broken by which commits, and what the errors were.
I have posted it since it's not a lot of use having it privately - it might be useful to others.
Pleasae don;t misunderstand me. I really appreciate your posting. And if this tool is really that fast, I want to have it, and the sooner the better. But then, we should dump MAKEALL, especially if it's so slow.
In the end, there should be just one such tool...
Best regards,
Wolfgang Denk

Hi Wolfgang,
On Wed, Oct 31, 2012 at 4:37 PM, Wolfgang Denk wd@denx.de wrote:
Dear Simon,
In message CAPnjgZ1qV=Odn93NMWEeAOzOOeBHyWwXB21s+n+cXXh0nan=eg@mail.gmail.com you wrote:
Yes...it's mostly for building a list of commits (e.g. an entire branch) and automatically tracking and showing what boards break between commits. It is optimised for this - e.g. it can build 22 commits for 1000 boards (22,000 builds) in about an hour on a fast machine, which is a few times faster than I have managed with MAKEALL.
Do you have an explanation why this is so? There must be some fundamental difference, and I would really like to understand that.
I think it is partly that it checks out each commit and builds it incrementally (without throwing away existing files). So checking out the next commit may not affect the board being built at all, in which case the build is very fast.
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago, and was something that Mike's buildall script did. But I found that it had conflicts and was hard to use. For example, it seems to be unhappy when different board were configured to different architectures (something to do with include path confusion which I hope is now fixed - partly the subject of my efb2172 commit). Also, it prints all the output in a bit of a mess, so it's hard to see what board it relates to, and you have to go back to the log files to see.
It is a completely different tool, yes, but I have found myself using it instead of what I previously used: MAKEALL plus Mike's 'buildall' wrapper. The main thing I like about it is that it quickly shows me which builds are broken by which commits, and what the errors were.
I have posted it since it's not a lot of use having it privately - it might be useful to others.
Pleasae don;t misunderstand me. I really appreciate your posting. And if this tool is really that fast, I want to have it, and the sooner the better. But then, we should dump MAKEALL, especially if it's so slow.
In the end, there should be just one such tool...
Well I hope we can have a period of trying it out :-) Let's see what people think, and there is still work to do. It has become my tool of choice, but only in the last few months.
Regards, Simon
Best regards,
Wolfgang Denk
-- DENX Software Engineering GmbH, MD: Wolfgang Denk & Detlev Zundel HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany Phone: (+49)-8142-66989-10 Fax: (+49)-8142-66989-80 Email: wd@denx.de "If you can, help others. If you can't, at least don't hurt others."
- the Dalai Lama

On Wed, Oct 31, 2012 at 05:11:51PM -0700, Simon Glass wrote:
[snip]
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago,
MAKEALL supports this, but doesn't default to it, today. I'm wondering if we ought to make it the default for all non-single board builds. The winning point is right around boards-to-build == `grep -c processor /proc/cpuinfo` and I think is right around setting NBUILDS to that value, both on consumer multicore and heavy-duty 32/64 core boxes.

On Wed, Oct 31, 2012 at 8:18 PM, Tom Rini trini@ti.com wrote:
On Wed, Oct 31, 2012 at 05:11:51PM -0700, Simon Glass wrote:
[snip]
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago,
MAKEALL supports this, but doesn't default to it, today. I'm wondering if we ought to make it the default for all non-single board builds. The winning point is right around boards-to-build == `grep -c processor /proc/cpuinfo` and I think is right around setting NBUILDS to that value, both on consumer multicore and heavy-duty 32/64 core boxes.
Yeah, I usually set BUILD_NBUILDS to 24-50 on my 24-thread system, and then set BUILD_NCPUS to 1-4. It's nice to offer a little bit of parallelism on the individual builds, sometimes. I did a bunch of tests, but not in a scientific fashion. Enough to agree with you that maxing out builds seems to win for u-boot.
Andy

Hi Andy / Tom,
On Wed, Oct 31, 2012 at 9:17 PM, Andy Fleming afleming@gmail.com wrote:
On Wed, Oct 31, 2012 at 8:18 PM, Tom Rini trini@ti.com wrote:
On Wed, Oct 31, 2012 at 05:11:51PM -0700, Simon Glass wrote:
[snip]
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago,
MAKEALL supports this, but doesn't default to it, today. I'm wondering if we ought to make it the default for all non-single board builds. The winning point is right around boards-to-build == `grep -c processor /proc/cpuinfo` and I think is right around setting NBUILDS to that value, both on consumer multicore and heavy-duty 32/64 core boxes.
Yeah, I usually set BUILD_NBUILDS to 24-50 on my 24-thread system, and then set BUILD_NCPUS to 1-4. It's nice to offer a little bit of parallelism on the individual builds, sometimes. I did a bunch of tests, but not in a scientific fashion. Enough to agree with you that maxing out builds seems to win for u-boot.
As a bit of an unscientific test, what sort of time does it take to build all 1000-or-so boards on your systems?
Andy
Regards, Simon

-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
On 11/01/12 14:01, Simon Glass wrote:
Hi Andy / Tom,
On Wed, Oct 31, 2012 at 9:17 PM, Andy Fleming afleming@gmail.com wrote:
On Wed, Oct 31, 2012 at 8:18 PM, Tom Rini trini@ti.com wrote:
On Wed, Oct 31, 2012 at 05:11:51PM -0700, Simon Glass wrote:
[snip]
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago,
MAKEALL supports this, but doesn't default to it, today. I'm wondering if we ought to make it the default for all non-single board builds. The winning point is right around boards-to-build == `grep -c processor /proc/cpuinfo` and I think is right around setting NBUILDS to that value, both on consumer multicore and heavy-duty 32/64 core boxes.
Yeah, I usually set BUILD_NBUILDS to 24-50 on my 24-thread system, and then set BUILD_NCPUS to 1-4. It's nice to offer a little bit of parallelism on the individual builds, sometimes. I did a bunch of tests, but not in a scientific fashion. Enough to agree with you that maxing out builds seems to win for u-boot.
As a bit of an unscientific test, what sort of time does it take to build all 1000-or-so boards on your systems?
My setup using MAKEALL does 921 boards (arm/powerpc/mips) with ELDK 5.2 in 54min wall-clock with 397% CPU util (on a 6 core machine).
- -- Tom

Hi,
On Thu, Nov 1, 2012 at 4:29 PM, Tom Rini trini@ti.com wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
On 11/01/12 14:01, Simon Glass wrote:
Hi Andy / Tom,
On Wed, Oct 31, 2012 at 9:17 PM, Andy Fleming afleming@gmail.com wrote:
On Wed, Oct 31, 2012 at 8:18 PM, Tom Rini trini@ti.com wrote:
On Wed, Oct 31, 2012 at 05:11:51PM -0700, Simon Glass wrote:
[snip]
Also rather than running with 'make -j40' or whatever, it uses 'make -j1' but with 40 threads. This increases CPU utilisation quite substantially (almost 50%) - I think this was discussed some time ago,
MAKEALL supports this, but doesn't default to it, today. I'm wondering if we ought to make it the default for all non-single board builds. The winning point is right around boards-to-build == `grep -c processor /proc/cpuinfo` and I think is right around setting NBUILDS to that value, both on consumer multicore and heavy-duty 32/64 core boxes.
Yeah, I usually set BUILD_NBUILDS to 24-50 on my 24-thread system, and then set BUILD_NCPUS to 1-4. It's nice to offer a little bit of parallelism on the individual builds, sometimes. I did a bunch of tests, but not in a scientific fashion. Enough to agree with you that maxing out builds seems to win for u-boot.
As a bit of an unscientific test, what sort of time does it take to build all 1000-or-so boards on your systems?
My setup using MAKEALL does 921 boards (arm/powerpc/mips) with ELDK 5.2 in 54min wall-clock with 397% CPU util (on a 6 core machine).
buildman is best at building a series of commits at once. I suspect with just one commit its performance would be similar to MAKEALL. I use it to verify a series so that I know it will bisect correctly.
Has anyone given it a try? I have found one bug where it gets confused when a builder thread moved from one architecture to another. Apart from that I am thinking of cleaning up the series and submitting some proper patches. Any comments?
Regards, Simon
Tom -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) Comment: Using GnuPG with Mozilla - http://www.enigmail.net/
iQIcBAEBAgAGBQJQkwXKAAoJENk4IS6UOR1Ws+oP/A2Estr8LHk2zt3cclDrshwI 9iNpiMvwJ2a/Qj3DzK6q3tuLKIJq/Chfsgczrz3wlv6RGqN5SB8U/Cbl9/OAg9oC jmSEjpGxT3pJyRKscyf/78cjMb06BadHVybPjkMtmfdlay3+H4CLLFkVahM1BKut +7MxTFtQIkPkHtXUwcTSYXuIhwwz61d6R65colHoHWVIbtRWpksaWa64107mEg3K 0A4Ite+FYoMK0BeswA1mPGknDCCLfgE32oXI4k4tQBAXrLcYWKlxLcirGydiZ46l Mrqh+62EA4U2FEVj3VbRjB2JtqRiqtbYYNSk9fwkdU0Fa/eYZacYYxStZVnVTfaQ RgZd0Gq7knJUed8bS0RvptDKLnMCadc0CgVEQ6Svj2JgfeZ9l/HOJkUX/MI7bCmv eKrtQQium4cJfXx2fGGdR2O21g+oQML/V0UTHkq78qMr3Z9Bqj3wRRyVGc5ch7kb 3P8aYjlJG8qU6OtDiwvRNIMvAQzovetJu18L+F/X3lgxqsvrEjk1Ugdach0smmkl TcM9U21la7oOAxhFg4J5W6aatdDx6kKU6s4K68kpcvpQ0MJjQ6bhjKASzf/VFuTl 6vLXUiN+qHc/2yBmuxgMluRfAzFEsLWZ3Y6r3bQC4f3mfmUZxt7CJ8D+rYqSQKUW TDHABRVdUDJxbypUaA8q =VhSN -----END PGP SIGNATURE-----

Dear Simon Glass,
In message 1351718752-6832-1-git-send-email-sjg@chromium.org you wrote:
These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
...are required. So. And why exactly? Or what is the purpose of these changes?
Best regards,
Wolfgang Denk

Hi Wolfgang,
On Wed, Oct 31, 2012 at 3:42 PM, Wolfgang Denk wd@denx.de wrote:
Dear Simon Glass,
In message 1351718752-6832-1-git-send-email-sjg@chromium.org you wrote:
These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
...are required. So. And why exactly? Or what is the purpose of these changes?
Just so that people can try the builder if they want to. The patches enhance functions in patman, mostly on the git side, so that the builder can do its job. For example it needs to clone a repo, checkout code into a different directory and work with branches a bit more.
This is not a useful patch for any other purpose (e.g. review) - it is just a lump of code. If there is interest in this it will need to be turned into proper patches.
Regards, Simon
Best regards,
Wolfgang Denk
-- DENX Software Engineering GmbH, MD: Wolfgang Denk & Detlev Zundel HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany Phone: (+49)-8142-66989-10 Fax: (+49)-8142-66989-80 Email: wd@denx.de Nobody goes to that restaurant anymore. It's too crowded.

Dear Simon,
In message CAPnjgZ3us4yoeqOHxozQ6VPHXhaKJdLWfb0HPWHWvnnUaZ0Ghg@mail.gmail.com you wrote:
These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
...are required. So. And why exactly? Or what is the purpose of these changes?
Just so that people can try the builder if they want to. The patches enhance functions in patman, mostly on the git side, so that the builder can do its job. For example it needs to clone a repo, checkout code into a different directory and work with branches a bit more.
This is not a useful patch for any other purpose (e.g. review) - it is just a lump of code. If there is interest in this it will need to be turned into proper patches.
Hm... I apologize, but I'm just an old man, and a bit slow of wits these days. I cannot review any such code without knowing what it is suppose to acchieve. Maybe you should add such explanations to the commit message, even if it's only a WIP patch? I would definitely appreciate this (as it would help me to understand what this is all about).
Best regards,
Wolfgang Denk
-- DENX Software Engineering GmbH, MD: Wolfgang Denk & Detlev Zundel HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany Phone: (+49)-8142-66989-10 Fax: (+49)-8142-66989-80 Email: wd@denx.de Half of the people in the world are below average.

Hi Wolfgang,
On Wed, Oct 31, 2012 at 4:40 PM, Wolfgang Denk wd@denx.de wrote:
Dear Simon,
In message CAPnjgZ3us4yoeqOHxozQ6VPHXhaKJdLWfb0HPWHWvnnUaZ0Ghg@mail.gmail.com you wrote:
These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
...are required. So. And why exactly? Or what is the purpose of these changes?
Just so that people can try the builder if they want to. The patches enhance functions in patman, mostly on the git side, so that the builder can do its job. For example it needs to clone a repo, checkout code into a different directory and work with branches a bit more.
This is not a useful patch for any other purpose (e.g. review) - it is just a lump of code. If there is interest in this it will need to be turned into proper patches.
Hm... I apologize, but I'm just an old man, and a bit slow of wits these days. I cannot review any such code without knowing what it is suppose to acchieve. Maybe you should add such explanations to the commit message, even if it's only a WIP patch? I would definitely appreciate this (as it would help me to understand what this is all about).
:-) OK I see. I will add a proper commit message so it is properly described, but please do understand it is WIP so far.
Regards, Simon
Best regards,
Wolfgang Denk
-- DENX Software Engineering GmbH, MD: Wolfgang Denk & Detlev Zundel HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany Phone: (+49)-8142-66989-10 Fax: (+49)-8142-66989-80 Email: wd@denx.de Half of the people in the world are below average.

Hi Wolfgang,
On Wed, Oct 31, 2012 at 5:04 PM, Simon Glass sjg@chromium.org wrote:
Hi Wolfgang,
On Wed, Oct 31, 2012 at 4:40 PM, Wolfgang Denk wd@denx.de wrote:
Dear Simon,
In message CAPnjgZ3us4yoeqOHxozQ6VPHXhaKJdLWfb0HPWHWvnnUaZ0Ghg@mail.gmail.com you wrote:
These changes are required to the patman libraries. This is not a proper patch yet, just sometime to try out.
...are required. So. And why exactly? Or what is the purpose of these changes?
Just so that people can try the builder if they want to. The patches enhance functions in patman, mostly on the git side, so that the builder can do its job. For example it needs to clone a repo, checkout code into a different directory and work with branches a bit more.
This is not a useful patch for any other purpose (e.g. review) - it is just a lump of code. If there is interest in this it will need to be turned into proper patches.
Hm... I apologize, but I'm just an old man, and a bit slow of wits these days. I cannot review any such code without knowing what it is suppose to acchieve. Maybe you should add such explanations to the commit message, even if it's only a WIP patch? I would definitely appreciate this (as it would help me to understand what this is all about).
Here are the notes for the changes. When I split this into commits I will put these comments with each commit.
checkpatch.py: - remove two commented-out lines which are no-longer needed
command.py:
Change these functions to return a CommandResult class instead of just a return code. This allows us to capture output, errors and return codes. Should we need to expand the functionality in the future, it will be fairly easy - just add new members to the class.
Also provide an option to raise an exception on error (non-zero return code), rather than always just returning the return code. This can make it easier to handle unforseen errors.
Provide a back-door way of killing all tasks (although this needs further work).
cros_subprocess.py:
This is a slight enhancement of the build-in python subprocess module. It permits access to command output while it is in progress. This is important if we want to filter it for errors, etc., while still displaying it on the terminal. Since some tasks can take a minute to complete, it is not acceptable to show no output during this time.
gitutils.py:
Several functions are enhanced so that you can specify a --git-dir with the command, and also a --work-tree. This allows us to work with git repositories outside the current directory. This is needed for buildman, since it has multiple threads working in their own place with their own checked-out commit.
New methods are added to clone a repo, fetch updates from a repo, checkout a commit, and obtain an expression for the list of commits in a branch.
patchstream.py
GetMetaDataForList() now supports --git-dir, and also allows an existing Series to be added too. This allows us to call it twice with different commit ranges and get a single, unified series. The old GetMetaData() function now calls this.
terminal.py:
This now supports both bright and normal ANSI colours.
Colours are automatically supressed if the stdout is not a terminal. The avoids getting ANSI characters in piped output.
Regards, Simon
participants (4)
-
Andy Fleming
-
Simon Glass
-
Tom Rini
-
Wolfgang Denk