Browse Source

Update bitbake from the upstream.

Origin: https://github.com/openembedded/bitbake.git
Commit: f8126aaf774186a6eaf0bd4067b89c074594886c

Signed-off-by: Alexander Smirnov <asmirnov@ilbers.de>
Alexander Smirnov 7 years ago
parent
commit
a6e101f567
100 changed files with 8473 additions and 3939 deletions
  1. 2 0
      bitbake/LICENSE
  2. 35 0
      bitbake/README
  3. 2 2
      bitbake/bin/bitbake
  4. 133 88
      bitbake/bin/bitbake-diffsigs
  5. 51 22
      bitbake/bin/bitbake-dumpsig
  6. 13 32
      bitbake/bin/bitbake-layers
  7. 1 0
      bitbake/bin/bitbake-selftest
  8. 69 32
      bitbake/bin/bitbake-worker
  9. 165 0
      bitbake/bin/git-make-shallow
  10. 79 32
      bitbake/bin/toaster
  11. 0 1
      bitbake/conf/bitbake.conf
  12. 1 1
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.xml
  13. 23 24
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.xml
  14. 40 32
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.xml
  15. 36 22
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.xml
  16. 359 126
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.xml
  17. 15 25
      bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.xml
  18. 1 1
      bitbake/doc/bitbake-user-manual/bitbake-user-manual.xml
  19. 1 1
      bitbake/doc/bitbake.1
  20. 13 13
      bitbake/lib/bb/COW.py
  21. 1 1
      bitbake/lib/bb/__init__.py
  22. 77 26
      bitbake/lib/bb/build.py
  23. 25 22
      bitbake/lib/bb/cache.py
  24. 50 16
      bitbake/lib/bb/codeparser.py
  25. 356 65
      bitbake/lib/bb/command.py
  26. 269 302
      bitbake/lib/bb/cooker.py
  27. 77 23
      bitbake/lib/bb/cookerdata.py
  28. 32 143
      bitbake/lib/bb/daemonize.py
  29. 28 75
      bitbake/lib/bb/data.py
  30. 103 34
      bitbake/lib/bb/data_smart.py
  31. 131 48
      bitbake/lib/bb/event.py
  32. 219 143
      bitbake/lib/bb/fetch2/__init__.py
  33. 4 5
      bitbake/lib/bb/fetch2/bzr.py
  34. 5 6
      bitbake/lib/bb/fetch2/clearcase.py
  35. 8 8
      bitbake/lib/bb/fetch2/cvs.py
  36. 210 39
      bitbake/lib/bb/fetch2/git.py
  37. 20 4
      bitbake/lib/bb/fetch2/gitannex.py
  38. 12 8
      bitbake/lib/bb/fetch2/gitsm.py
  39. 9 9
      bitbake/lib/bb/fetch2/hg.py
  40. 4 14
      bitbake/lib/bb/fetch2/local.py
  41. 42 28
      bitbake/lib/bb/fetch2/npm.py
  42. 7 8
      bitbake/lib/bb/fetch2/osc.py
  43. 10 11
      bitbake/lib/bb/fetch2/perforce.py
  44. 4 4
      bitbake/lib/bb/fetch2/repo.py
  45. 98 0
      bitbake/lib/bb/fetch2/s3.py
  46. 2 4
      bitbake/lib/bb/fetch2/sftp.py
  47. 2 3
      bitbake/lib/bb/fetch2/ssh.py
  48. 10 11
      bitbake/lib/bb/fetch2/svn.py
  49. 57 35
      bitbake/lib/bb/fetch2/wget.py
  50. 161 172
      bitbake/lib/bb/main.py
  51. 16 11
      bitbake/lib/bb/monitordisk.py
  52. 22 0
      bitbake/lib/bb/msg.py
  53. 5 1
      bitbake/lib/bb/parse/__init__.py
  54. 5 74
      bitbake/lib/bb/parse/ast.py
  55. 13 13
      bitbake/lib/bb/parse/parse_py/BBHandler.py
  56. 20 12
      bitbake/lib/bb/parse/parse_py/ConfHandler.py
  57. 3 7
      bitbake/lib/bb/persist_data.py
  58. 45 38
      bitbake/lib/bb/process.py
  59. 9 10
      bitbake/lib/bb/providers.py
  60. 116 0
      bitbake/lib/bb/remotedata.py
  61. 321 150
      bitbake/lib/bb/runqueue.py
  62. 0 78
      bitbake/lib/bb/server/__init__.py
  63. 526 172
      bitbake/lib/bb/server/process.py
  64. 0 422
      bitbake/lib/bb/server/xmlrpc.py
  65. 154 0
      bitbake/lib/bb/server/xmlrpcclient.py
  66. 158 0
      bitbake/lib/bb/server/xmlrpcserver.py
  67. 173 55
      bitbake/lib/bb/siggen.py
  68. 16 20
      bitbake/lib/bb/taskdata.py
  69. 57 9
      bitbake/lib/bb/tests/codeparser.py
  70. 207 46
      bitbake/lib/bb/tests/data.py
  71. 986 0
      bitbake/lib/bb/tests/event.py
  72. 793 175
      bitbake/lib/bb/tests/fetch.py
  73. 36 15
      bitbake/lib/bb/tests/parse.py
  74. 831 78
      bitbake/lib/bb/tinfoil.py
  75. 122 54
      bitbake/lib/bb/ui/buildinfohelper.py
  76. 91 81
      bitbake/lib/bb/ui/knotty.py
  77. 2 2
      bitbake/lib/bb/ui/ncurses.py
  78. 21 27
      bitbake/lib/bb/ui/taskexp.py
  79. 11 24
      bitbake/lib/bb/ui/toasterui.py
  80. 7 1
      bitbake/lib/bb/ui/uihelper.py
  81. 74 41
      bitbake/lib/bb/utils.py
  82. 47 24
      bitbake/lib/bblayers/action.py
  83. 1 1
      bitbake/lib/bblayers/common.py
  84. 6 5
      bitbake/lib/bblayers/layerindex.py
  85. 35 34
      bitbake/lib/bblayers/query.py
  86. 12 5
      bitbake/lib/bs4/builder/_html5lib.py
  87. 56 23
      bitbake/lib/prserv/serv.py
  88. 22 0
      bitbake/lib/simplediff/LICENSE
  89. 198 0
      bitbake/lib/simplediff/__init__.py
  90. 7 5
      bitbake/lib/toaster/bldcollector/urls.py
  91. 2 2
      bitbake/lib/toaster/bldcontrol/bbcontroller.py
  92. 143 93
      bitbake/lib/toaster/bldcontrol/localhostbecontroller.py
  93. 7 4
      bitbake/lib/toaster/bldcontrol/management/commands/checksettings.py
  94. 25 10
      bitbake/lib/toaster/bldcontrol/management/commands/runbuilds.py
  95. 0 141
      bitbake/lib/toaster/bldcontrol/tests.py
  96. 0 6
      bitbake/lib/toaster/contrib/README
  97. 0 41
      bitbake/lib/toaster/contrib/tts/README
  98. 0 9
      bitbake/lib/toaster/contrib/tts/TODO
  99. 0 98
      bitbake/lib/toaster/contrib/tts/config.py
  100. 0 101
      bitbake/lib/toaster/contrib/tts/launcher.py

+ 2 - 0
bitbake/LICENSE

@@ -15,3 +15,5 @@ Foundation and individual contributors.
 * QUnit is redistributed under the MIT license.
 
 * Font Awesome fonts redistributed under the SIL Open Font License 1.1
+
+* simplediff is distributed under the zlib license.

+ 35 - 0
bitbake/README

@@ -0,0 +1,35 @@
+Bitbake
+=======
+
+BitBake is a generic task execution engine that allows shell and Python tasks to be run
+efficiently and in parallel while working within complex inter-task dependency constraints.
+One of BitBake's main users, OpenEmbedded, takes this core and builds embedded Linux software
+stacks using a task-oriented approach.
+
+For information about Bitbake, see the OpenEmbedded website:
+    http://www.openembedded.org/
+
+Bitbake plain documentation can be found under the doc directory or its integrated
+html version at the Yocto Project website:
+    http://yoctoproject.org/documentation
+
+Contributing
+------------
+
+Please refer to
+http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
+for guidelines on how to submit patches, just note that the latter documentation is intended
+for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
+but in general main guidelines apply. Once the commit(s) have been created, the way to send
+the patch is through git-send-email. For example, to send the last commit (HEAD) on current
+branch, type:
+
+    git send-email -M -1 --to bitbake-devel@lists.openembedded.org
+
+Mailing list:
+
+    http://lists.openembedded.org/mailman/listinfo/bitbake-devel
+
+Source code:
+
+    http://git.openembedded.org/bitbake/

+ 2 - 2
bitbake/bin/bitbake

@@ -36,9 +36,9 @@ from bb import cookerdata
 from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
 
 if sys.getfilesystemencoding() != "utf-8":
-    sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
+    sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
 
-__version__ = "1.31.2"
+__version__ = "1.37.0"
 
 if __name__ == "__main__":
     if __version__ != bb.__version__:

+ 133 - 88
bitbake/bin/bitbake-diffsigs

@@ -3,7 +3,7 @@
 # bitbake-diffsigs
 # BitBake task signature data comparison utility
 #
-# Copyright (C) 2012-2013 Intel Corporation
+# Copyright (C) 2012-2013, 2017 Intel Corporation
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 as
@@ -22,7 +22,7 @@ import os
 import sys
 import warnings
 import fnmatch
-import optparse
+import argparse
 import logging
 import pickle
 
@@ -30,109 +30,154 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), '
 
 import bb.tinfoil
 import bb.siggen
+import bb.msg
+
+logger = bb.msg.logger_create('bitbake-diffsigs')
+
+def find_siginfo(tinfoil, pn, taskname, sigs=None):
+    result = None
+    tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
+                            'logging.LogRecord',
+                            'bb.command.CommandCompleted',
+                            'bb.command.CommandFailed'])
+    ret = tinfoil.run_command('findSigInfo', pn, taskname, sigs)
+    if ret:
+        while True:
+            event = tinfoil.wait_event(1)
+            if event:
+                if isinstance(event, bb.command.CommandCompleted):
+                    break
+                elif isinstance(event, bb.command.CommandFailed):
+                    logger.error(str(event))
+                    sys.exit(2)
+                elif isinstance(event, bb.event.FindSigInfoResult):
+                    result = event.result
+                elif isinstance(event, logging.LogRecord):
+                    logger.handle(event)
+    else:
+        logger.error('No result returned from findSigInfo command')
+        sys.exit(2)
+    return result
 
-def logger_create(name, output=sys.stderr):
-    logger = logging.getLogger(name)
-    console = logging.StreamHandler(output)
-    format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
-    if output.isatty():
-        format.enable_color()
-    console.setFormatter(format)
-    logger.addHandler(console)
-    logger.setLevel(logging.INFO)
-    return logger
-
-logger = logger_create('bitbake-diffsigs')
-
-def find_compare_task(bbhandler, pn, taskname):
+def find_compare_task(bbhandler, pn, taskname, sig1=None, sig2=None, color=False):
     """ Find the most recent signature files for the specified PN/task and compare them """
 
-    def get_hashval(siginfo):
-        if siginfo.endswith('.siginfo'):
-            return siginfo.rpartition(':')[2].partition('_')[0]
-        else:
-            return siginfo.rpartition('.')[2]
-
-    if not hasattr(bb.siggen, 'find_siginfo'):
-        logger.error('Metadata does not support finding signature data files')
-        sys.exit(1)
-
     if not taskname.startswith('do_'):
         taskname = 'do_%s' % taskname
 
-    filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
-    latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
-    if not latestfiles:
-        logger.error('No sigdata files found matching %s %s' % (pn, taskname))
-        sys.exit(1)
-    elif len(latestfiles) < 2:
-        logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
-        sys.exit(1)
+    if sig1 and sig2:
+        sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
+        if len(sigfiles) == 0:
+            logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
+            sys.exit(1)
+        elif not sig1 in sigfiles:
+            logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
+            sys.exit(1)
+        elif not sig2 in sigfiles:
+            logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
+            sys.exit(1)
+        latestfiles = [sigfiles[sig1], sigfiles[sig2]]
     else:
-        # It's possible that latestfiles contain 3 elements and the first two have the same hash value.
-        # In this case, we delete the second element.
-        # The above case is actually the most common one. Because we may have sigdata file and siginfo
-        # file having the same hash value. Comparing such two files makes no sense.
-        if len(latestfiles) == 3:
-            hash0 = get_hashval(latestfiles[0])
-            hash1 = get_hashval(latestfiles[1])
-            if hash0 == hash1:
-                latestfiles.pop(1)
-
-        # Define recursion callback
-        def recursecb(key, hash1, hash2):
-            hashes = [hash1, hash2]
-            hashfiles = bb.siggen.find_siginfo(key, None, hashes, bbhandler.config_data)
-
-            recout = []
-            if len(hashfiles) == 2:
-                out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
-                recout.extend(list('  ' + l for l in out2))
-            else:
-                recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
-
-            return recout
-
-        # Recurse into signature comparison
-        output = bb.siggen.compare_sigfiles(latestfiles[0], latestfiles[1], recursecb)
-        if output:
-            print('\n'.join(output))
+        filedates = find_siginfo(bbhandler, pn, taskname)
+        latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
+        if not latestfiles:
+            logger.error('No sigdata files found matching %s %s' % (pn, taskname))
+            sys.exit(1)
+        elif len(latestfiles) < 2:
+            logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
+            sys.exit(1)
+
+    # Define recursion callback
+    def recursecb(key, hash1, hash2):
+        hashes = [hash1, hash2]
+        hashfiles = find_siginfo(bbhandler, key, None, hashes)
+
+        recout = []
+        if len(hashfiles) == 0:
+            recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
+        elif not hash1 in hashfiles:
+            recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
+        elif not hash2 in hashfiles:
+            recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
+        else:
+            out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
+            for change in out2:
+                for line in change.splitlines():
+                    recout.append('  ' + line)
+
+        return recout
+
+    # Recurse into signature comparison
+    logger.debug("Signature file (previous): %s" % latestfiles[-2])
+    logger.debug("Signature file (latest): %s" % latestfiles[-1])
+    output = bb.siggen.compare_sigfiles(latestfiles[-2], latestfiles[-1], recursecb, color=color)
+    if output:
+        print('\n'.join(output))
     sys.exit(0)
 
 
 
-parser = optparse.OptionParser(
-    description = "Compares siginfo/sigdata files written out by BitBake",
-    usage = """
-  %prog -t recipename taskname
-  %prog sigdatafile1 sigdatafile2
-  %prog sigdatafile1""")
+parser = argparse.ArgumentParser(
+    description="Compares siginfo/sigdata files written out by BitBake")
 
-parser.add_option("-t", "--task",
-        help = "find the signature data files for last two runs of the specified task and compare them",
-        action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
+parser.add_argument('-d', '--debug',
+                    help='Enable debug output',
+                    action='store_true')
 
-options, args = parser.parse_args(sys.argv)
+parser.add_argument('--color',
+        help='Colorize output (where %(metavar)s is %(choices)s)',
+        choices=['auto', 'always', 'never'], default='auto', metavar='color')
+
+parser.add_argument("-t", "--task",
+        help="find the signature data files for last two runs of the specified task and compare them",
+        action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
+
+parser.add_argument("-s", "--signature",
+        help="With -t/--task, specify the signatures to look for instead of taking the last two",
+        action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
+
+parser.add_argument("sigdatafile1",
+        help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
+        action="store", nargs='?')
+
+parser.add_argument("sigdatafile2",
+        help="Second signature file to compare",
+        action="store", nargs='?')
+
+
+options = parser.parse_args()
+
+if options.debug:
+    logger.setLevel(logging.DEBUG)
+
+color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.isatty()))
 
 if options.taskargs:
     with bb.tinfoil.Tinfoil() as tinfoil:
         tinfoil.prepare(config_only=True)
-        find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1])
+        if options.sigargs:
+            find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1], color=color)
+        else:
+            find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], color=color)
 else:
-    if len(args) == 1:
-        parser.print_help()
-    else:
-        try:
-            if len(args) == 2:
-                output = bb.siggen.dump_sigfile(sys.argv[1])
-            else:
-                output = bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
-        except IOError as e:
-            logger.error(str(e))
-            sys.exit(1)
-        except (pickle.UnpicklingError, EOFError):
-            logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
+    if options.sigargs:
+        logger.error('-s/--signature can only be used together with -t/--task')
+        sys.exit(1)
+    try:
+        if options.sigdatafile1 and options.sigdatafile2:
+            output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, color=color)
+        elif options.sigdatafile1:
+            output = bb.siggen.dump_sigfile(options.sigdatafile1)
+        else:
+            logger.error('Must specify signature file(s) or -t/--task')
+            parser.print_help()
             sys.exit(1)
+    except IOError as e:
+        logger.error(str(e))
+        sys.exit(1)
+    except (pickle.UnpicklingError, EOFError):
+        logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
+        sys.exit(1)
 
-        if output:
-            print('\n'.join(output))
+    if output:
+        print('\n'.join(output))

+ 51 - 22
bitbake/bin/bitbake-dumpsig

@@ -27,39 +27,68 @@ import pickle
 
 sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
 
+import bb.tinfoil
 import bb.siggen
+import bb.msg
 
-def logger_create(name, output=sys.stderr):
-    logger = logging.getLogger(name)
-    console = logging.StreamHandler(output)
-    format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
-    if output.isatty():
-        format.enable_color()
-    console.setFormatter(format)
-    logger.addHandler(console)
-    logger.setLevel(logging.INFO)
-    return logger
+logger = bb.msg.logger_create('bitbake-dumpsig')
 
-logger = logger_create('bitbake-dumpsig')
+def find_siginfo_task(bbhandler, pn, taskname):
+    """ Find the most recent signature file for the specified PN/task """
+
+    if not hasattr(bb.siggen, 'find_siginfo'):
+        logger.error('Metadata does not support finding signature data files')
+        sys.exit(1)
+
+    if not taskname.startswith('do_'):
+        taskname = 'do_%s' % taskname
+
+    filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
+    latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-1:]
+    if not latestfiles:
+        logger.error('No sigdata files found matching %s %s' % (pn, taskname))
+        sys.exit(1)
+
+    return latestfiles[0]
 
 parser = optparse.OptionParser(
     description = "Dumps siginfo/sigdata files written out by BitBake",
     usage = """
+  %prog -t recipename taskname
   %prog sigdatafile""")
 
+parser.add_option("-D", "--debug",
+        help = "enable debug",
+        action = "store_true", dest="debug", default = False)
+
+parser.add_option("-t", "--task",
+        help = "find the signature data file for the specified task",
+        action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
+
 options, args = parser.parse_args(sys.argv)
 
-if len(args) == 1:
+if options.debug:
+    logger.setLevel(logging.DEBUG)
+
+if options.taskargs:
+    tinfoil = bb.tinfoil.Tinfoil()
+    tinfoil.prepare(config_only = True)
+    file = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
+    logger.debug("Signature file: %s" % file)
+elif len(args) == 1:
     parser.print_help()
+    sys.exit(0)
 else:
-    try:
-        output = bb.siggen.dump_sigfile(args[1])
-    except IOError as e:
-        logger.error(str(e))
-        sys.exit(1)
-    except (pickle.UnpicklingError, EOFError):
-        logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
-        sys.exit(1)
+    file = args[1]
+
+try:
+    output = bb.siggen.dump_sigfile(file)
+except IOError as e:
+    logger.error(str(e))
+    sys.exit(1)
+except (pickle.UnpicklingError, EOFError):
+    logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
+    sys.exit(1)
 
-    if output:
-        print('\n'.join(output))
+if output:
+    print('\n'.join(output))

+ 13 - 32
bitbake/bin/bitbake-layers

@@ -24,49 +24,26 @@ import logging
 import os
 import sys
 import argparse
+import signal
 
 bindir = os.path.dirname(__file__)
 topdir = os.path.dirname(bindir)
 sys.path[0:0] = [os.path.join(topdir, 'lib')]
 
 import bb.tinfoil
+import bb.msg
 
-
-def tinfoil_init(parserecipes):
-    import bb.tinfoil
-    tinfoil = bb.tinfoil.Tinfoil(tracking=True)
-    tinfoil.prepare(not parserecipes)
-    tinfoil.logger.setLevel(logger.getEffectiveLevel())
-    return tinfoil
-
-
-def logger_create(name, output=sys.stderr):
-    logger = logging.getLogger(name)
-    loggerhandler = logging.StreamHandler(output)
-    loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
-    logger.addHandler(loggerhandler)
-    logger.setLevel(logging.INFO)
-    return logger
-
-def logger_setup_color(logger, color='auto'):
-    from bb.msg import BBLogFormatter
-    console = logging.StreamHandler(sys.stdout)
-    formatter = BBLogFormatter("%(levelname)s: %(message)s")
-    console.setFormatter(formatter)
-    logger.handlers = [console]
-    if color == 'always' or (color == 'auto' and console.stream.isatty()):
-        formatter.enable_color()
-
-
-logger = logger_create('bitbake-layers', sys.stdout)
+logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
 
 def main():
+    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
     parser = argparse.ArgumentParser(
         description="BitBake layers utility",
         epilog="Use %(prog)s <subcommand> --help to get help on a specific command",
         add_help=False)
     parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
     parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+    parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
     parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
 
     global_args, unparsed_args = parser.parse_known_args()
@@ -83,13 +60,17 @@ def main():
     elif global_args.quiet:
         logger.setLevel(logging.ERROR)
 
-    logger_setup_color(logger, global_args.color)
+    # Need to re-run logger_create with color argument
+    # (will be the same logger since it has the same name)
+    bb.msg.logger_create('bitbake-layers', output=sys.stdout, color=global_args.color)
 
     plugins = []
-    tinfoil = tinfoil_init(False)
+    tinfoil = bb.tinfoil.Tinfoil(tracking=True)
+    tinfoil.logger.setLevel(logger.getEffectiveLevel())
     try:
+        tinfoil.prepare(True)
         for path in ([topdir] +
-                    tinfoil.config_data.getVar('BBPATH', True).split(':')):
+                    tinfoil.config_data.getVar('BBPATH').split(':')):
             pluginpath = os.path.join(path, 'lib', 'bblayers')
             bb.utils.load_plugins(logger, plugins, pluginpath)
 
@@ -109,7 +90,7 @@ def main():
 
         if getattr(args, 'parserecipes', False):
             tinfoil.config_data.disableTracking()
-            tinfoil.parseRecipes()
+            tinfoil.parse_recipes()
             tinfoil.config_data.enableTracking()
 
         return args.func(args)

+ 1 - 0
bitbake/bin/bitbake-selftest

@@ -28,6 +28,7 @@ except RuntimeError as exc:
 tests = ["bb.tests.codeparser",
          "bb.tests.cow",
          "bb.tests.data",
+         "bb.tests.event",
          "bb.tests.fetch",
          "bb.tests.parse",
          "bb.tests.utils"]

+ 69 - 32
bitbake/bin/bitbake-worker

@@ -11,10 +11,13 @@ import select
 import errno
 import signal
 import pickle
+import traceback
+import queue
 from multiprocessing import Lock
+from threading import Thread
 
 if sys.getfilesystemencoding() != "utf-8":
-    sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
+    sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
 
 # Users shouldn't be running this code directly
 if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
@@ -63,7 +66,7 @@ if 0:
     consolelog.setFormatter(conlogformat)
     logger.addHandler(consolelog)
 
-worker_queue = b""
+worker_queue = queue.Queue()
 
 def worker_fire(event, d):
     data = b"<event>" + pickle.dumps(event) + b"</event>"
@@ -72,21 +75,39 @@ def worker_fire(event, d):
 def worker_fire_prepickled(event):
     global worker_queue
 
-    worker_queue = worker_queue + event
-    worker_flush()
+    worker_queue.put(event)
 
-def worker_flush():
-    global worker_queue, worker_pipe
+#
+# We can end up with write contention with the cooker, it can be trying to send commands
+# and we can be trying to send event data back. Therefore use a separate thread for writing 
+# back data to cooker.
+#
+worker_thread_exit = False
 
-    if not worker_queue:
-        return
+def worker_flush(worker_queue):
+    worker_queue_int = b""
+    global worker_pipe, worker_thread_exit
 
-    try:
-        written = os.write(worker_pipe, worker_queue)
-        worker_queue = worker_queue[written:]
-    except (IOError, OSError) as e:
-        if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
-            raise
+    while True:
+        try:
+            worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
+        except queue.Empty:
+            pass
+        while (worker_queue_int or not worker_queue.empty()):
+            try:
+                (_, ready, _) = select.select([], [worker_pipe], [], 1)
+                if not worker_queue.empty():
+                    worker_queue_int = worker_queue_int + worker_queue.get()
+                written = os.write(worker_pipe, worker_queue_int)
+                worker_queue_int = worker_queue_int[written:]
+            except (IOError, OSError) as e:
+                if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
+                    raise
+        if worker_thread_exit and worker_queue.empty() and not worker_queue_int:
+            return
+
+worker_thread = Thread(target=worker_flush, args=(worker_queue,))
+worker_thread.start()
 
 def worker_child_fire(event, d):
     global worker_pipe
@@ -115,7 +136,7 @@ def sigterm_handler(signum, frame):
     os.killpg(0, signal.SIGTERM)
     sys.exit()
 
-def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, quieterrors=False):
+def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
     # We need to setup the environment BEFORE the fork, since
     # a fork() or exec*() activates PSEUDO...
 
@@ -131,8 +152,10 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
         except TypeError:
              umask = taskdep['umask'][taskname]
 
+    dry_run = cfg.dry_run or dry_run_exec
+
     # We can't use the fakeroot environment in a dry run as it possibly hasn't been built
-    if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run:
+    if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
         envvars = (workerdata["fakerootenv"][fn] or "").split()
         for key, value in (var.split('=') for var in envvars):
             envbackup[key] = os.environ.get(key)
@@ -199,16 +222,21 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
                 the_data = databuilder.mcdata[mc]
                 the_data.setVar("BB_WORKERCONTEXT", "1")
                 the_data.setVar("BB_TASKDEPDATA", taskdepdata)
+                if cfg.limited_deps:
+                    the_data.setVar("BB_LIMITEDDEPS", "1")
                 the_data.setVar("BUILDNAME", workerdata["buildname"])
                 the_data.setVar("DATE", workerdata["date"])
                 the_data.setVar("TIME", workerdata["time"])
+                for varname, value in extraconfigdata.items():
+                    the_data.setVar(varname, value)
+
                 bb.parse.siggen.set_taskdata(workerdata["sigdata"])
                 ret = 0
 
                 the_data = bb_cache.loadDataFull(fn, appends)
                 the_data.setVar('BB_TASKHASH', workerdata["runq_hash"][task])
 
-                bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN", True), taskname.replace("do_", "")))
+                bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
 
                 # exported_vars() returns a generator which *cannot* be passed to os.environ.update() 
                 # successfully. We also need to unset anything from the environment which shouldn't be there 
@@ -223,23 +251,23 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
                     the_data.setVar(e, fakeenv[e])
                     the_data.setVarFlag(e, 'export', "1")
 
-                task_exports = the_data.getVarFlag(taskname, 'exports', True)
+                task_exports = the_data.getVarFlag(taskname, 'exports')
                 if task_exports:
                     for e in task_exports.split():
                         the_data.setVarFlag(e, 'export', '1')
-                        v = the_data.getVar(e, True)
+                        v = the_data.getVar(e)
                         if v is not None:
                             os.environ[e] = v
 
                 if quieterrors:
                     the_data.setVarFlag(taskname, "quieterrors", "1")
 
-            except Exception as exc:
+            except Exception:
                 if not quieterrors:
-                    logger.critical(str(exc))
+                    logger.critical(traceback.format_exc())
                 os._exit(1)
             try:
-                if cfg.dry_run:
+                if dry_run:
                     return 0
                 return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
             except:
@@ -308,6 +336,7 @@ class BitbakeWorker(object):
         self.cookercfg = None
         self.databuilder = None
         self.data = None
+        self.extraconfigdata = None
         self.build_pids = {}
         self.build_pipes = {}
     
@@ -342,6 +371,7 @@ class BitbakeWorker(object):
                     pass
             if len(self.queue):
                 self.handle_item(b"cookerconfig", self.handle_cookercfg)
+                self.handle_item(b"extraconfigdata", self.handle_extraconfigdata)
                 self.handle_item(b"workerdata", self.handle_workerdata)
                 self.handle_item(b"runtask", self.handle_runtask)
                 self.handle_item(b"finishnow", self.handle_finishnow)
@@ -349,10 +379,11 @@ class BitbakeWorker(object):
                 self.handle_item(b"quit", self.handle_quit)
 
             for pipe in self.build_pipes:
-                self.build_pipes[pipe].read()
+                if self.build_pipes[pipe].input in ready:
+                    self.build_pipes[pipe].read()
             if len(self.build_pids):
-                self.process_waitpid()
-            worker_flush()
+                while self.process_waitpid():
+                    continue
 
 
     def handle_item(self, item, func):
@@ -369,6 +400,9 @@ class BitbakeWorker(object):
         self.databuilder.parseBaseConfiguration()
         self.data = self.databuilder.data
 
+    def handle_extraconfigdata(self, data):
+        self.extraconfigdata = pickle.loads(data)
+
     def handle_workerdata(self, data):
         self.workerdata = pickle.loads(data)
         bb.msg.loggerDefaultDebugLevel = self.workerdata["logdefaultdebug"]
@@ -391,10 +425,10 @@ class BitbakeWorker(object):
         sys.exit(0)
 
     def handle_runtask(self, data):
-        fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data)
+        fn, task, taskname, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
         workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
 
-        pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, quieterrors)
+        pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
 
         self.build_pids[pid] = task
         self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
@@ -407,9 +441,9 @@ class BitbakeWorker(object):
         try:
             pid, status = os.waitpid(-1, os.WNOHANG)
             if pid == 0 or os.WIFSTOPPED(status):
-                return None
+                return False
         except OSError:
-            return None
+            return False
 
         workerlog_write("Exit code of %s for pid %s\n" % (status, pid))
 
@@ -428,6 +462,8 @@ class BitbakeWorker(object):
 
         worker_fire_prepickled(b"<exitcode>" + pickle.dumps((task, status)) + b"</exitcode>")
 
+        return True
+
     def handle_finishnow(self, _):
         if self.build_pids:
             logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids))
@@ -457,8 +493,9 @@ except BaseException as e:
         import traceback
         sys.stderr.write(traceback.format_exc())
         sys.stderr.write(str(e))
-while len(worker_queue):
-    worker_flush()
+
+worker_thread_exit = True
+worker_thread.join()
+
 workerlog_write("exitting")
 sys.exit(0)
-

+ 165 - 0
bitbake/bin/git-make-shallow

@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+"""git-make-shallow: make the current git repository shallow
+
+Remove the history of the specified revisions, then optionally filter the
+available refs to those specified.
+"""
+
+import argparse
+import collections
+import errno
+import itertools
+import os
+import subprocess
+import sys
+
+version = 1.0
+
+
+def main():
+    if sys.version_info < (3, 4, 0):
+        sys.exit('Python 3.4 or greater is required')
+
+    git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
+    shallow_file = os.path.join(git_dir, 'shallow')
+    if os.path.exists(shallow_file):
+        try:
+            check_output(['git', 'fetch', '--unshallow'])
+        except subprocess.CalledProcessError:
+            try:
+                os.unlink(shallow_file)
+            except OSError as exc:
+                if exc.errno != errno.ENOENT:
+                    raise
+
+    args = process_args()
+    revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
+
+    make_shallow(shallow_file, args.revisions, args.refs)
+
+    ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
+    remaining_history = set(revs) & set(ref_revs)
+    for rev in remaining_history:
+        if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
+            sys.exit('Error: %s was not made shallow' % rev)
+
+    filter_refs(args.refs)
+
+    if args.shrink:
+        shrink_repo(git_dir)
+        subprocess.check_call(['git', 'fsck', '--unreachable'])
+
+
+def process_args():
+    # TODO: add argument to automatically keep local-only refs, since they
+    # can't be easily restored with a git fetch.
+    parser = argparse.ArgumentParser(description='Remove the history of the specified revisions, then optionally filter the available refs to those specified.')
+    parser.add_argument('--ref', '-r', metavar='REF', action='append', dest='refs', help='remove all but the specified refs (cumulative)')
+    parser.add_argument('--shrink', '-s', action='store_true', help='shrink the git repository by repacking and pruning')
+    parser.add_argument('revisions', metavar='REVISION', nargs='+', help='a git revision/commit')
+    if len(sys.argv) < 2:
+        parser.print_help()
+        sys.exit(2)
+
+    args = parser.parse_args()
+
+    if args.refs:
+        args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
+    else:
+        args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
+
+    args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
+    args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
+    return args
+
+
+def check_output(cmd, input=None):
+    return subprocess.check_output(cmd, universal_newlines=True, input=input)
+
+
+def make_shallow(shallow_file, revisions, refs):
+    """Remove the history of the specified revisions."""
+    for rev in follow_history_intersections(revisions, refs):
+        print("Processing %s" % rev)
+        with open(shallow_file, 'a') as f:
+            f.write(rev + '\n')
+
+
+def get_all_refs(ref_filter=None):
+    """Return all the existing refs in this repository, optionally filtering the refs."""
+    ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
+    ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
+    if ref_filter:
+        ref_split = (e for e in ref_split if ref_filter(*e))
+    refs = [r[0] for r in ref_split]
+    return refs
+
+
+def iter_extend(iterable, length, obj=None):
+    """Ensure that iterable is the specified length by extending with obj."""
+    return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length)
+
+
+def filter_refs(refs):
+    """Remove all but the specified refs from the git repository."""
+    all_refs = get_all_refs()
+    to_remove = set(all_refs) - set(refs)
+    if to_remove:
+        check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
+                     input=''.join(l + '\0' for l in to_remove))
+
+
+def follow_history_intersections(revisions, refs):
+    """Determine all the points where the history of the specified revisions intersects the specified refs."""
+    queue = collections.deque(revisions)
+    seen = set()
+
+    for rev in iter_except(queue.popleft, IndexError):
+        if rev in seen:
+            continue
+
+        parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
+
+        yield rev
+        seen.add(rev)
+
+        if not parents:
+            continue
+
+        check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
+        for parent in parents:
+            for ref in check_refs:
+                print("Checking %s vs %s" % (parent, ref))
+                try:
+                    merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
+                except subprocess.CalledProcessError:
+                    continue
+                else:
+                    queue.append(merge_base)
+
+
+def iter_except(func, exception, start=None):
+    """Yield a function repeatedly until it raises an exception."""
+    try:
+        if start is not None:
+            yield start()
+        while True:
+            yield func()
+    except exception:
+        pass
+
+
+def shrink_repo(git_dir):
+    """Shrink the newly shallow repository, removing the unreachable objects."""
+    subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
+    subprocess.check_call(['git', 'repack', '-ad'])
+    try:
+        os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
+    except OSError as exc:
+        if exc.errno != errno.ENOENT:
+            raise
+    subprocess.check_call(['git', 'prune', '--expire', 'now'])
+
+
+if __name__ == '__main__':
+    main()

+ 79 - 32
bitbake/bin/toaster

@@ -18,15 +18,52 @@
 # along with this program. If not, see http://www.gnu.org/licenses/.
 
 HELP="
-Usage: source toaster start|stop [webport=<address:port>] [noweb]
+Usage: source toaster start|stop [webport=<address:port>] [noweb] [nobuild]
     Optional arguments:
-        [noweb] Setup the environment for building with toaster but don't start the development server
+        [nobuild] Setup the environment for capturing builds with toaster but disable managed builds
+        [noweb] Setup the environment for capturing builds with toaster but don't start the web server
         [webport] Set the development server (default: localhost:8000)
 "
 
+custom_extention()
+{
+    custom_extension=$BBBASEDIR/lib/toaster/orm/fixtures/custom_toaster_append.sh
+    if [ -f $custom_extension ] ; then
+        $custom_extension $*
+    fi
+}
+
+databaseCheck()
+{
+    retval=0
+    # you can always add a superuser later via
+    # ../bitbake/lib/toaster/manage.py createsuperuser --username=<ME>
+    $MANAGE migrate --noinput || retval=1
+
+    if [ $retval -eq 1 ]; then
+        echo "Failed migrations, aborting system start" 1>&2
+        return $retval
+    fi
+    # Make sure that checksettings can pick up any value for TEMPLATECONF
+    export TEMPLATECONF
+    $MANAGE checksettings --traceback || retval=1
+
+    if [ $retval -eq 1 ]; then
+        printf "\nError while checking settings; aborting\n"
+        return $retval
+    fi
+
+    return $retval
+}
+
 webserverKillAll()
 {
     local pidfile
+    if [ -f ${BUILDDIR}/.toastermain.pid ] ; then
+        custom_extention web_stop_postpend
+    else
+        custom_extention noweb_stop_postpend
+    fi
     for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
         if [ -f ${pidfile} ]; then
             pid=`cat ${pidfile}`
@@ -48,22 +85,9 @@ webserverStartAll()
     fi
 
     retval=0
-    # you can always add a superuser later via
-    # ../bitbake/lib/toaster/manage.py createsuperuser --username=<ME>
-    $MANAGE migrate --noinput || retval=1
 
-    if [ $retval -eq 1 ]; then
-        echo "Failed migrations, aborting system start" 1>&2
-        return $retval
-    fi
-    # Make sure that checksettings can pick up any value for TEMPLATECONF
-    export TEMPLATECONF
-    $MANAGE checksettings --traceback || retval=1
-
-    if [ $retval -eq 1 ]; then
-        printf "\nError while checking settings; aborting\n"
-        return $retval
-    fi
+    # check the database
+    databaseCheck || return 1
 
     echo "Starting webserver..."
 
@@ -79,6 +103,7 @@ webserverStartAll()
     else
         echo "Toaster development webserver started at http://$ADDR_PORT"
         echo -e "\nYou can now run 'bitbake <target>' on the command line and monitor your build in Toaster.\nYou can also use a Toaster project to configure and run a build.\n"
+        custom_extention web_start_postpend $ADDR_PORT
     fi
 
     return $retval
@@ -106,12 +131,18 @@ verify_prereq() {
     # Verify Django version
     reqfile=$(python3 -c "import os; print(os.path.realpath('$BBBASEDIR/toaster-requirements.txt'))")
     exp='s/Django\([><=]\+\)\([^,]\+\),\([><=]\+\)\(.\+\)/'
-    exp=$exp'import sys,django;version=django.get_version().split(".");'
-    exp=$exp'sys.exit(not (version \1 "\2".split(".") and version \3 "\4".split(".")))/p'
+    # expand version parts to 2 digits to support 1.10.x > 1.8
+    # (note:helper functions hard to insert in-line)
+    exp=$exp'import sys,django;'
+    exp=$exp'version=["%02d" % int(n) for n in django.get_version().split(".")];'
+    exp=$exp'vmin=["%02d" % int(n) for n in "\2".split(".")];'
+    exp=$exp'vmax=["%02d" % int(n) for n in "\4".split(".")];'
+    exp=$exp'sys.exit(not (version \1 vmin and version \3 vmax))'
+    exp=$exp'/p'
     if ! sed -n "$exp" $reqfile | python3 - ; then
         req=`grep ^Django $reqfile`
         echo "This program needs $req"
-        echo "Please install with pip install -r $reqfile"
+        echo "Please install with pip3 install -r $reqfile"
         return 2
     fi
 
@@ -151,16 +182,9 @@ fi
 
 unset OE_ROOT
 
-# this defines the dir toaster will use for
-# 1) clones of layers (in _toaster_clones )
-# 2) the build dir (in build)
-# 3) the sqlite db if that is being used.
-# 4) pid's we need to clean up on exit/shutdown
-# note: for future. in order to make this an arbitrary directory, we need to
-# make sure that the toaster.sqlite file doesn't default to `pwd` like it currently does.
-export TOASTER_DIR=`pwd`
 
 WEBSERVER=1
+export TOASTER_BUILDSERVER=1
 ADDR_PORT="localhost:8000"
 unset CMD
 for param in $*; do
@@ -168,6 +192,9 @@ for param in $*; do
     noweb )
             WEBSERVER=0
     ;;
+    nobuild )
+            TOASTER_BUILDSERVER=0
+    ;;
     start )
             CMD=$param
     ;;
@@ -214,10 +241,8 @@ fi
 # 2) the build dir (in build)
 # 3) the sqlite db if that is being used.
 # 4) pid's we need to clean up on exit/shutdown
-# note: for future. in order to make this an arbitrary directory, we need to
-# make sure that the toaster.sqlite file doesn't default to `pwd`
-# like it currently does.
 export TOASTER_DIR=`dirname $BUILDDIR`
+export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
 
 # Determine the action. If specified by arguments, fine, if not, toggle it
 if [ "$CMD" = "start" ] ; then
@@ -234,6 +259,7 @@ fi
 echo "The system will $CMD."
 
 # Execute the commands
+custom_extention toaster_prepend $CMD $ADDR_PORT
 
 case $CMD in
     start )
@@ -249,15 +275,34 @@ case $CMD in
         line='INHERIT+="toaster buildhistory"'
         grep -q "$line" $conf || echo $line >> $conf
 
+        if [ $WEBSERVER -eq 0 ] ; then
+            # Do not update the database for "noweb" unless
+            # it does not yet exist
+            if [ ! -f "$TOASTER_DIR/toaster.sqlite" ] ; then
+                if ! databaseCheck; then
+                    echo "Failed ${CMD}."
+                    return 4
+                fi
+            fi
+            custom_extention noweb_start_postpend $ADDR_PORT
+        fi
         if [ $WEBSERVER -gt 0 ] && ! webserverStartAll; then
             echo "Failed ${CMD}."
             return 4
         fi
         export BITBAKE_UI='toasterui'
-        $MANAGE runbuilds & echo $! >${BUILDDIR}/.runbuilds.pid
+        if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
+            $MANAGE runbuilds \
+               </dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
+               & echo $! >${BUILDDIR}/.runbuilds.pid
+        else
+            echo "Toaster build server not started."
+        fi
+
         # set fail safe stop system on terminal exit
         trap stop_system SIGHUP
         echo "Successful ${CMD}."
+        custom_extention toaster_postpend $CMD $ADDR_PORT
         return 0
     ;;
     stop )
@@ -265,3 +310,5 @@ case $CMD in
         echo "Successful ${CMD}."
     ;;
 esac
+custom_extention toaster_postpend $CMD $ADDR_PORT
+

+ 0 - 1
bitbake/conf/bitbake.conf

@@ -24,7 +24,6 @@ DEPENDS = ""
 DEPLOY_DIR = "${TMPDIR}/deploy"
 DEPLOY_DIR_IMAGE = "${DEPLOY_DIR}/images"
 DL_DIR = "${TMPDIR}/downloads"
-FILESDIR = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
 FILESPATH = "${FILE_DIRNAME}/${PF}:${FILE_DIRNAME}/${P}:${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
 FILE_DIRNAME = "${@os.path.dirname(d.getVar('FILE', False))}"
 GITDIR = "${DL_DIR}/git"

+ 1 - 1
bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.xml

@@ -916,7 +916,7 @@
         <para>
             Finally, after all the setscene tasks have executed, BitBake calls the
             function listed in
-            <link linkend='var-BB_SETSCENE_VERIFY_FUNCTION'><filename>BB_SETSCENE_VERIFY_FUNCTION</filename></link>
+            <link linkend='var-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>
             with the list of tasks BitBake thinks has been "covered".
             The metadata can then ensure that this list is correct and can
             inform BitBake that it wants specific tasks to be run regardless

+ 23 - 24
bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.xml

@@ -38,7 +38,7 @@
             The code to execute the first part of this process, a fetch,
             looks something like the following:
             <literallayout class='monospaced'>
-     src_uri = (d.getVar('SRC_URI', True) or "").split()
+     src_uri = (d.getVar('SRC_URI') or "").split()
      fetcher = bb.fetch2.Fetch(src_uri, d)
      fetcher.download()
             </literallayout>
@@ -52,7 +52,7 @@
         <para>
             The instantiation of the fetch class is usually followed by:
             <literallayout class='monospaced'>
-     rootdir = l.getVar('WORKDIR', True)
+     rootdir = l.getVar('WORKDIR')
      fetcher.unpack(rootdir)
             </literallayout>
             This code unpacks the downloaded files to the
@@ -268,15 +268,6 @@
                 <link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>
                 variable is used in the same way
                 <filename>PATH</filename> is used to find executables.
-                Failing that,
-                <link linkend='var-FILESDIR'><filename>FILESDIR</filename></link>
-                is used to find the appropriate relative file.
-                <note>
-                    <filename>FILESDIR</filename> is deprecated and can
-                    be replaced with <filename>FILESPATH</filename>.
-                    Because <filename>FILESDIR</filename> is likely to be
-                    removed, you should not use this variable in any new code.
-                </note>
                 If the file cannot be found, it is assumed that it is available in
                 <link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
                 by the time the <filename>download()</filename> method is called.
@@ -385,7 +376,8 @@
                 The supported parameters are as follows:
                 <itemizedlist>
                     <listitem><para><emphasis>"method":</emphasis>
-                        The protocol over which to communicate with the CVS server.
+                        The protocol over which to communicate with the CVS
+                        server.
                         By default, this protocol is "pserver".
                         If "method" is set to "ext", BitBake examines the
                         "rsh" parameter and sets <filename>CVS_RSH</filename>.
@@ -469,25 +461,29 @@
                         You can think of this parameter as the top-level
                         directory of the repository data you want.
                         </para></listitem>
+                    <listitem><para><emphasis>"path_spec":</emphasis>
+                        A specific directory in which to checkout the
+                        specified svn module.
+                        </para></listitem>
                     <listitem><para><emphasis>"protocol":</emphasis>
                         The protocol to use, which defaults to "svn".
-                        Other options are "svn+ssh" and "rsh".
-                        For "rsh", the "rsh" parameter is also used.
+                        If "protocol" is set to "svn+ssh", the "ssh"
+                        parameter is also used.
                         </para></listitem>
                     <listitem><para><emphasis>"rev":</emphasis>
                         The revision of the source code to checkout.
                         </para></listitem>
-                    <listitem><para><emphasis>"date":</emphasis>
-                        The date of the source code to checkout.
-                        Specific revisions are generally much safer to checkout
-                        rather than by date as they do not involve timezones
-                        (e.g. they are much more deterministic).
-                        </para></listitem>
                     <listitem><para><emphasis>"scmdata":</emphasis>
                         Causes the “.svn” directories to be available during
                         compile-time when set to "keep".
                         By default, these directories are removed.
                         </para></listitem>
+                    <listitem><para><emphasis>"ssh":</emphasis>
+                        An optional parameter used when "protocol" is set
+                        to "svn+ssh".
+                        You can use this parameter to specify the ssh
+                        program used by svn.
+                        </para></listitem>
                     <listitem><para><emphasis>"transportuser":</emphasis>
                         When required, sets the username for the transport.
                         By default, this parameter is empty.
@@ -496,10 +492,11 @@
                         command.
                         </para></listitem>
                 </itemizedlist>
-                Following are two examples using svn:
+                Following are three examples using svn:
                 <literallayout class='monospaced'>
-     SRC_URI = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
-     SRC_URI = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
+     SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667"
+     SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh"
+     SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1"
                 </literallayout>
             </para>
         </section>
@@ -623,7 +620,9 @@
                         The Git Submodules fetcher is not a complete fetcher
                         implementation.
                         The fetcher has known issues where it does not use the
-                        normal source mirroring infrastructure properly.
+                        normal source mirroring infrastructure properly. Further,
+                        the submodule sources it fetches are not visible to the
+                        licensing and source archiving infrastructures.
                     </para>
                 </note>
             </para>

+ 40 - 32
bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.xml

@@ -128,15 +128,8 @@
         </para>
 
         <note>
-            This example was inspired by and drew heavily from these sources:
-            <itemizedlist>
-                <listitem><para>
-                    <ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
-                    </para></listitem>
-                <listitem><para>
-                    <ulink url="http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
-                    </para></listitem>
-            </itemizedlist>
+            This example was inspired by and drew heavily from
+            <ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>.
         </note>
 
         <para>
@@ -267,9 +260,9 @@
                 files.
                 For this example, you need to create the file in your project directory
                 and define some key BitBake variables.
-                For more information on the <filename>bitbake.conf</filename>,
+                For more information on the <filename>bitbake.conf</filename> file,
                 see
-                <ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
+                <ulink url='http://git.openembedded.org/bitbake/tree/conf/bitbake.conf'></ulink>.
                 </para>
                 <para>Use the following commands to create the <filename>conf</filename>
                 directory in the project directory:
@@ -280,14 +273,32 @@
                 some editor to create the <filename>bitbake.conf</filename>
                 so that it contains the following:
                 <literallayout class='monospaced'>
+     <link linkend='var-PN'>PN</link>  = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
+                </literallayout>
+                <literallayout class='monospaced'>
      TMPDIR  = "${<link linkend='var-TOPDIR'>TOPDIR</link>}/tmp"
      <link linkend='var-CACHE'>CACHE</link>   = "${TMPDIR}/cache"
-     <link linkend='var-STAMP'>STAMP</link>   = "${TMPDIR}/stamps"
-     <link linkend='var-T'>T</link>       = "${TMPDIR}/work"
-     <link linkend='var-B'>B</link>       = "${TMPDIR}"
+     <link linkend='var-STAMP'>STAMP</link>   = "${TMPDIR}/${PN}/stamps"
+     <link linkend='var-T'>T</link>       = "${TMPDIR}/${PN}/work"
+     <link linkend='var-B'>B</link>       = "${TMPDIR}/${PN}"
                 </literallayout>
+                <note>
+                    Without a value for <filename>PN</filename>, the
+                    variables <filename>STAMP</filename>,
+                    <filename>T</filename>, and <filename>B</filename>,
+                    prevent more than one recipe from working. You can fix
+                    this by either setting <filename>PN</filename> to have
+                    a value similar to what OpenEmbedded and BitBake use
+                    in the default <filename>bitbake.conf</filename> file
+                    (see previous example). Or, by manually updating each
+                    recipe to set <filename>PN</filename>. You will also
+                    need to include <filename>PN</filename> as part of the
+                    <filename>STAMP</filename>, <filename>T</filename>, and
+                    <filename>B</filename> variable definitions in the
+                    <filename>local.conf</filename> file.
+                </note>
                 The <filename>TMPDIR</filename> variable establishes a directory
-                that BitBake uses for build output and intermediate files (other
+                that BitBake uses for build output and intermediate files other
                 than the cached information used by the
                 <link linkend='setscene'>Setscene</link> process.
                 Here, the <filename>TMPDIR</filename> directory is set to
@@ -307,19 +318,19 @@
                 file exists, you can run the <filename>bitbake</filename>
                 command again:
                 <literallayout class='monospaced'>
-$ bitbake
-ERROR: Traceback (most recent call last):
-  File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
-    return func(fn, *args)
-  File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
-    bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
-  File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
-    include(fn, file, lineno, d, "inherit")
-  File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
-    raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
-ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
+     $ bitbake
+     ERROR: Traceback (most recent call last):
+       File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
+         return func(fn, *args)
+       File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
+         bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
+       File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
+         include(fn, file, lineno, d, "inherit")
+       File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
+         raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
+     ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
 
-ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
+     ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
                 </literallayout>
                 In the sample output, BitBake could not find the
                 <filename>classes/base.bbclass</filename> file.
@@ -352,9 +363,6 @@ ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inh
                 Of course, the <filename>base.bbclass</filename> can have much
                 more depending on which build environments BitBake is
                 supporting.
-                For more information on the <filename>base.bbclass</filename> file,
-                you can look at
-                <ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
                 </para></listitem>
             <listitem><para><emphasis>Run Bitbake:</emphasis>
                 After making sure that the <filename>classes/base.bbclass</filename>
@@ -375,8 +383,8 @@ ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inh
                 code separate from the general metadata used by BitBake.
                 Thus, this example creates and uses a layer called "mylayer".
                 <note>
-                    You can find additional information on adding a layer at
-                    <ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
+                    You can find additional information on layers at
+                    <ulink url='http://www.yoctoproject.org/docs/2.3/bitbake-user-manual/bitbake-user-manual.html#layers'></ulink>.
                 </note>
                 </para>
                 <para>Minimally, you need a recipe file and a layer configuration

+ 36 - 22
bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.xml

@@ -440,7 +440,7 @@
                     Build Checkout:</emphasis>
                     A final possibility for getting a copy of BitBake is that it
                     already comes with your checkout of a larger Bitbake-based build
-                    system, such as Poky or Yocto Project.
+                    system, such as Poky.
                     Rather than manually checking out individual layers and
                     gluing them together yourself, you can check
                     out an entire build system.
@@ -488,8 +488,6 @@
                              target that failed and anything depending on it cannot
                              be built, as much as possible will be built before
                              stopping.
-       -a, --tryaltconfigs   Continue with builds by trying to use alternative
-                             providers where possible.
        -f, --force           Force the specified targets/task to run (invalidating
                              any existing stamp file).
        -c CMD, --cmd=CMD     Specify the task to execute. The exact options
@@ -504,9 +502,20 @@
                              Read the specified file before bitbake.conf.
        -R POSTFILE, --postread=POSTFILE
                              Read the specified file after bitbake.conf.
-       -v, --verbose         Output more log message data to the terminal.
+       -v, --verbose         Enable tracing of shell tasks (with 'set -x'). Also
+                             print bb.note(...) messages to stdout (in addition to
+                             writing them to ${T}/log.do_&lt;task&gt;).
        -D, --debug           Increase the debug level. You can specify this more
-                             than once.
+                             than once. -D sets the debug level to 1, where only
+                             bb.debug(1, ...) messages are printed to stdout; -DD
+                             sets the debug level to 2, where both bb.debug(1, ...)
+                             and bb.debug(2, ...) messages are printed; etc.
+                             Without -D, no debug messages are printed. Note that
+                             -D only affects output to stdout. All debug messages
+                             are written to ${T}/log.do_taskname, regardless of the
+                             debug level.
+       -q, --quiet           Output less log message data to the terminal. You can
+                             specify this more than once.
        -n, --dry-run         Don't execute, just go through the motions.
        -S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
                              Dump out the signature construction information, with
@@ -529,29 +538,34 @@
        -l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
                              Show debug logging for the specified logging domains
        -P, --profile         Profile the command and save reports.
-       -u UI, --ui=UI        The user interface to use (depexp, goggle, hob, knotty
-                             or ncurses - default knotty).
-       -t SERVERTYPE, --servertype=SERVERTYPE
-                             Choose which server type to use (process or xmlrpc -
-                             default process).
+       -u UI, --ui=UI        The user interface to use (knotty, ncurses or taskexp
+                             - default knotty).
        --token=XMLRPCTOKEN   Specify the connection token to be used when
                              connecting to a remote server.
        --revisions-changed   Set the exit code depending on whether upstream
                              floating revisions have changed or not.
        --server-only         Run bitbake without a UI, only starting a server
                              (cooker) process.
-       -B BIND, --bind=BIND  The name/address for the bitbake server to bind to.
+       -B BIND, --bind=BIND  The name/address for the bitbake xmlrpc server to bind
+                             to.
+       -T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
+                             Set timeout to unload bitbake server due to
+                             inactivity, set to -1 means no unload, default:
+                             Environment variable BB_SERVER_TIMEOUT.
        --no-setscene         Do not run any setscene tasks. sstate will be ignored
                              and everything needed, built.
+       --setscene-only       Only run setscene tasks, don't run any real tasks.
        --remote-server=REMOTE_SERVER
                              Connect to the specified server.
-       -m, --kill-server     Terminate the remote server.
+       -m, --kill-server     Terminate any running bitbake server.
        --observe-only        Connect to a server as an observing-only client.
        --status-only         Check the status of the remote bitbake server.
        -w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
                              Writes the event log of the build to a bitbake event
                              json file. Use '' (empty string) to assign the name
                              automatically.
+       --runall=RUNALL       Run the specified task for all build targets and their
+                             dependencies.
                 </literallayout>
             </para>
         </section>
@@ -665,21 +679,21 @@
                 </para>
 
                 <para>
-                    When you generate a dependency graph, BitBake writes four files
+                    When you generate a dependency graph, BitBake writes three files
                     to the current working directory:
                     <itemizedlist>
-                        <listitem><para><emphasis><filename>package-depends.dot</filename>:</emphasis>
-                            Shows BitBake's knowledge of dependencies between
-                            runtime targets.
+                        <listitem><para>
+                            <emphasis><filename>recipe-depends.dot</filename>:</emphasis>
+                            Shows dependencies between recipes (i.e. a collapsed version of
+                            <filename>task-depends.dot</filename>).
                             </para></listitem>
-                        <listitem><para><emphasis><filename>pn-depends.dot</filename>:</emphasis>
-                            Shows dependencies between build-time targets
-                            (i.e. recipes).
-                            </para></listitem>
-                        <listitem><para><emphasis><filename>task-depends.dot</filename>:</emphasis>
+                        <listitem><para>
+                            <emphasis><filename>task-depends.dot</filename>:</emphasis>
                             Shows dependencies between tasks.
+                            These dependencies match BitBake's internal task execution list.
                             </para></listitem>
-                        <listitem><para><emphasis><filename>pn-buildlist</filename>:</emphasis>
+                        <listitem><para>
+                            <emphasis><filename>pn-buildlist</filename>:</emphasis>
                             Shows a simple list of targets that are to be built.
                             </para></listitem>
                     </itemizedlist>

+ 359 - 126
bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.xml

@@ -61,6 +61,48 @@
             </para>
         </section>
 
+        <section id='line-joining'>
+            <title>Line Joining</title>
+
+            <para>
+                Outside of
+                <link linkend='functions'>functions</link>, BitBake joins
+                any line ending in a backslash character ("\")
+                with the following line before parsing statements.
+                The most common use for the "\" character is to split variable
+                assignments over multiple lines, as in the following example:
+                <literallayout class='monospaced'>
+     FOO = "bar \
+            baz \
+            qaz"
+                </literallayout>
+                Both the "\" character and the newline character
+                that follow it are removed when joining lines.
+                Thus, no newline characters end up in the value of
+                <filename>FOO</filename>.
+            </para>
+
+            <para>
+                Consider this additional example where the two
+                assignments both assign "barbaz" to
+                <filename>FOO</filename>:
+                <literallayout class='monospaced'>
+     FOO = "barbaz"
+
+     FOO = "bar\
+     baz"
+                </literallayout>
+                <note>
+                    BitBake does not interpret escape sequences like
+                    "\n" in variable values.
+                    For these to have an effect, the value must be passed
+                    to some utility that interprets escape sequences,
+                    such as <filename>printf</filename> or
+                    <filename>echo -n</filename>.
+                </note>
+            </para>
+        </section>
+
         <section id='variable-expansion'>
             <title>Variable Expansion</title>
 
@@ -463,14 +505,14 @@
             <title>Unseting variables</title>
 
             <para>
-                It is possible to completely remove a variable or a variable flag 
+                It is possible to completely remove a variable or a variable flag
                 from BitBake's internal data dictionary by using the "unset" keyword.
                 Here is an example:
                 <literallayout class='monospaced'>
         unset DATE
         unset do_fetch[noexec]
                 </literallayout>
-                These two statements remove the <filename>DATE</filename> and the 
+                These two statements remove the <filename>DATE</filename> and the
                 <filename>do_fetch[noexec]</filename> flag.
             </para>
 
@@ -627,7 +669,7 @@
                         <literallayout class='monospaced'>
      DEPENDS = "glibc ncurses"
      OVERRIDES = "machine:local"
-     DEPENDS_append_machine = "libmad"
+     DEPENDS_append_machine = " libmad"
                         </literallayout>
                         In this example, <filename>DEPENDS</filename> becomes
                         "glibc ncurses libmad".
@@ -857,11 +899,12 @@
 
             <para>
                 The <filename>inherit</filename> directive is a rudimentary
-                means of specifying what classes of functionality your
-                recipes require.
+                means of specifying functionality contained in class files
+                that your recipes require.
                 For example, you can easily abstract out the tasks involved in
                 building a package that uses Autoconf and Automake and put
-                those tasks into a class file that can be used by your recipe.
+                those tasks into a class file and then have your recipe
+                inherit that class file.
             </para>
 
             <para>
@@ -880,13 +923,24 @@
                     inherited class within your recipe by doing so
                     after the "inherit" statement.
                 </note>
+                If you want to use the directive to inherit
+                multiple classes, separate them with spaces.
+                The following example shows how to inherit both the
+                <filename>buildhistory</filename> and <filename>rm_work</filename>
+                classes:
+                <literallayout class='monospaced'>
+     inherit buildhistory rm_work
+                </literallayout>
             </para>
 
             <para>
-                If necessary, it is possible to inherit a class
-                conditionally by using
-                a variable expression after the <filename>inherit</filename>
-                statement.
+                An advantage with the inherit directive as compared to both
+                the
+                <link linkend='include-directive'>include</link> and
+                <link linkend='require-inclusion'>require</link> directives
+                is that you can inherit class files conditionally.
+                You can accomplish this by using a variable expression
+                after the <filename>inherit</filename> statement.
                 Here is an example:
                 <literallayout class='monospaced'>
      inherit ${VARNAME}
@@ -942,6 +996,17 @@
                 within <filename>BBPATH</filename>.
             </para>
 
+            <para>
+                The include directive is a more generic method of including
+                functionality as compared to the
+                <link linkend='inherit-directive'>inherit</link> directive,
+                which is restricted to class (i.e. <filename>.bbclass</filename>)
+                files.
+                The include directive is applicable for any other kind of
+                shared or encapsulated functionality or configuration that
+                does not suit a <filename>.bbclass</filename> file.
+            </para>
+
             <para>
                 As an example, suppose you needed a recipe to include some
                 self-test definitions:
@@ -975,6 +1040,18 @@
                 being parsed at the location of the directive.
             </para>
 
+            <para>
+                The require directive, like the include directive previously
+                described, is a more generic method of including
+                functionality as compared to the
+                <link linkend='inherit-directive'>inherit</link> directive,
+                which is restricted to class (i.e. <filename>.bbclass</filename>)
+                files.
+                The require directive is applicable for any other kind of
+                shared or encapsulated functionality or configuration that
+                does not suit a <filename>.bbclass</filename> file.
+            </para>
+
             <para>
                 Similar to how BitBake handles
                 <link linkend='include-directive'><filename>include</filename></link>,
@@ -1007,8 +1084,9 @@
 
             <para>
                 When creating a configuration file (<filename>.conf</filename>),
-                you can use the <filename>INHERIT</filename> directive to
-                inherit a class.
+                you can use the
+                <link linkend='var-INHERIT'><filename>INHERIT</filename></link>
+                configuration directive to inherit a class.
                 BitBake only supports this directive when used within
                 a configuration file.
             </para>
@@ -1041,7 +1119,7 @@
                 <filename>autotools</filename> and <filename>pkgconfig</filename>
                 classes:
                 <literallayout class='monospaced'>
-     inherit autotools pkgconfig
+     INHERIT += "autotools pkgconfig"
                 </literallayout>
             </para>
         </section>
@@ -1165,7 +1243,7 @@
                 <literallayout class='monospaced'>
      python some_python_function () {
          d.setVar("TEXT", "Hello World")
-         print d.getVar("TEXT", True)
+         print d.getVar("TEXT")
      }
                 </literallayout>
                 Because the Python "bb" and "os" modules are already
@@ -1180,7 +1258,7 @@
                     to freely set variable values to expandable expressions
                     without having them expanded prematurely.
                     If you do wish to expand a variable within a Python
-                    function, use <filename>d.getVar("X", True)</filename>.
+                    function, use <filename>d.getVar("X")</filename>.
                     Or, for more complicated expressions, use
                     <filename>d.expand()</filename>.
                 </note>
@@ -1232,7 +1310,7 @@
                 Here is an example:
                 <literallayout class='monospaced'>
      def get_depends(d):
-         if d.getVar('SOMECONDITION', True):
+         if d.getVar('SOMECONDITION'):
              return "dependencywithcond"
          else:
              return "dependency"
@@ -1367,7 +1445,7 @@
                 based on the value of  another variable:
                 <literallayout class='monospaced'>
      python () {
-         if d.getVar('SOMEVAR', True) == 'value':
+         if d.getVar('SOMEVAR') == 'value':
              d.setVar('ANOTHERVAR', 'value2')
      }
                 </literallayout>
@@ -1531,37 +1609,29 @@
         <title>Tasks</title>
 
         <para>
-            Tasks are BitBake execution units that originate as
-            functions and make up the steps that BitBake needs to run
-            for given recipe.
-            Tasks are only supported in recipe (<filename>.bb</filename>
-            or <filename>.inc</filename>) and class
-            (<filename>.bbclass</filename>) files.
-            By convention, task names begin with the string "do_".
-        </para>
-
-        <para>
-            Here is an example of a task that prints out the date:
-            <literallayout class='monospaced'>
-     python do_printdate () {
-         import time
-         print time.strftime('%Y%m%d', time.gmtime())
-     }
-     addtask printdate after do_fetch before do_build
-            </literallayout>
+            Tasks are BitBake execution units that make up the
+            steps that BitBake can run for a given recipe.
+            Tasks are only supported in recipes and classes
+            (i.e. in <filename>.bb</filename> files and files
+            included or inherited from <filename>.bb</filename>
+            files).
+            By convention, tasks have names that start with "do_".
         </para>
 
         <section id='promoting-a-function-to-a-task'>
             <title>Promoting a Function to a Task</title>
 
             <para>
-                Any function can be promoted to a task by applying the
+                Tasks are either
+                <link linkend='shell-functions'>shell functions</link> or
+                <link linkend='bitbake-style-python-functions'>BitBake-style Python functions</link>
+                that have been promoted to tasks by using the
                 <filename>addtask</filename> command.
-                The <filename>addtask</filename> command also describes
-                inter-task dependencies.
-                Here is the function from the previous section but with the
-                <filename>addtask</filename> command promoting it to a task
-                and defining some dependencies:
+                The <filename>addtask</filename> command can also
+                optionally describe dependencies between the
+                task and other tasks.
+                Here is an example that shows how to define a task
+                and declare some dependencies:
                 <literallayout class='monospaced'>
      python do_printdate () {
          import time
@@ -1569,15 +1639,81 @@
      }
      addtask printdate after do_fetch before do_build
                 </literallayout>
-                In the example, the function is defined and then promoted
-                as a task.
-                The <filename>do_printdate</filename> task becomes a dependency of
-                the <filename>do_build</filename> task, which is the default
-                task.
-                And, the <filename>do_printdate</filename> task is dependent upon
-                the <filename>do_fetch</filename> task.
-                Execution of the <filename>do_build</filename> task results
-                in the <filename>do_printdate</filename> task running first.
+                The first argument to <filename>addtask</filename>
+                is the name of the function to promote to
+                a task.
+                If the name does not start with "do_", "do_" is
+                implicitly added, which enforces the convention that
+                all task names start with "do_".
+            </para>
+
+            <para>
+                In the previous example, the
+                <filename>do_printdate</filename> task becomes a
+                dependency of the <filename>do_build</filename>
+                task, which is the default task (i.e. the task run by
+                the <filename>bitbake</filename> command unless
+                another task is specified explicitly).
+                Additionally, the <filename>do_printdate</filename>
+                task becomes dependent upon the
+                <filename>do_fetch</filename> task.
+                Running the <filename>do_build</filename> task
+                results in the <filename>do_printdate</filename>
+                task running first.
+                <note>
+                    If you try out the previous example, you might see that
+                    the <filename>do_printdate</filename> task is only run
+                    the first time you build the recipe with
+                    the <filename>bitbake</filename> command.
+                    This is because BitBake considers the task "up-to-date"
+                    after that initial run.
+                    If you want to force the task to always be rerun for
+                    experimentation purposes, you can make BitBake always
+                    consider the task "out-of-date" by using the
+                    <filename>[</filename><link linkend='variable-flags'><filename>nostamp</filename></link><filename>]</filename>
+                    variable flag, as follows:
+                    <literallayout class='monospaced'>
+     do_printdate[nostamp] = "1"
+                    </literallayout>
+                    You can also explicitly run the task and provide the
+                    <filename>-f</filename> option as follows:
+                    <literallayout class='monospaced'>
+     $ bitbake <replaceable>recipe</replaceable> -c printdate -f
+                    </literallayout>
+                    When manually selecting a task to run with the
+                    <filename>bitbake</filename>&nbsp;<replaceable>recipe</replaceable>&nbsp;<filename>-c</filename>&nbsp;<replaceable>task</replaceable>
+                    command, you can omit the "do_" prefix as part of the
+                    task name.
+                </note>
+            </para>
+
+            <para>
+                You might wonder about the practical effects of using
+                <filename>addtask</filename> without specifying any
+                dependencies as is done in the following example:
+                <literallayout class='monospaced'>
+     addtask printdate
+                </literallayout>
+                In this example, assuming dependencies have not been
+                added through some other means, the only way to run
+                the task is by explicitly selecting it with
+                <filename>bitbake</filename>&nbsp;<replaceable>recipe</replaceable>&nbsp;<filename>-c printdate</filename>.
+                You can use the
+                <filename>do_listtasks</filename> task to list all tasks
+                defined in a recipe as shown in the following example:
+                <literallayout class='monospaced'>
+     $ bitbake <replaceable>recipe</replaceable> -c listtasks
+                </literallayout>
+                For more information on task dependencies, see the
+                "<link linkend='dependencies'>Dependencies</link>"
+                section.
+            </para>
+
+            <para>
+                See the
+                "<link linkend='variable-flags'>Variable Flags</link>"
+                section for information on variable flags you can use with
+                tasks.
             </para>
         </section>
 
@@ -1775,14 +1911,23 @@
                      exclusion.
                     </para></listitem>
                 <listitem><para><emphasis><filename>[noexec]</filename>:</emphasis>
-                    Marks the tasks as being empty and no execution required.
-                    The <filename>[noexec]</filename> flag can be used to set up
+                    When set to "1", marks the task as being empty, with
+                    no execution required.
+                    You can use the <filename>[noexec]</filename> flag to set up
                     tasks as dependency placeholders, or to disable tasks defined
                     elsewhere that are not needed in a particular recipe.
                     </para></listitem>
                 <listitem><para><emphasis><filename>[nostamp]</filename>:</emphasis>
-                    Tells BitBake to not generate a stamp file for a task,
-                    which implies the task should always be executed.
+                    When set to "1", tells BitBake to not generate a stamp
+                    file for a task, which implies the task should always
+                    be executed.
+                    <note><title>Caution</title>
+                        Any task that depends (possibly indirectly) on a
+                        <filename>[nostamp]</filename> task will always be
+                        executed as well.
+                        This can cause unnecessary rebuilding if you are
+                        not careful.
+                    </note>
                     </para></listitem>
                 <listitem><para><emphasis><filename>[postfuncs]</filename>:</emphasis>
                     List of functions to call after the completion of the task.
@@ -1875,128 +2020,196 @@
         <title>Events</title>
 
         <para>
-            BitBake allows installation of event handlers within
-            recipe and class files.
-            Events are triggered at certain points during operation,
-            such as the beginning of an operation against a given recipe
-            (<filename>*.bb</filename> file), the start of a given task,
-            task failure, task success, and so forth.
+            BitBake allows installation of event handlers within recipe
+            and class files.
+            Events are triggered at certain points during operation, such
+            as the beginning of operation against a given recipe
+            (i.e. <filename>*.bb</filename>), the start of a given task,
+            a task failure, a task success, and so forth.
             The intent is to make it easy to do things like email
-            notification on build failure.
+            notification on build failures.
         </para>
 
         <para>
-            Following is an example event handler that
-            prints the name of the event and the content of
-            the <filename>FILE</filename> variable:
+            Following is an example event handler that prints the name
+            of the event and the content of the
+            <filename>FILE</filename> variable:
             <literallayout class='monospaced'>
      addhandler myclass_eventhandler
      python myclass_eventhandler() {
          from bb.event import getName
-         from bb import data
          print("The name of the Event is %s" % getName(e))
-         print("The file we run for is %s" % data.getVar('FILE', e.data, True))
+         print("The file we run for is %s" % d.getVar('FILE'))
      }
+     myclass_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
             </literallayout>
-            This event handler gets called every time an event is
-            triggered.
-            A global variable "<filename>e</filename>" is defined and
-            "<filename>e.data</filename>" contains an instance of
-            "<filename>bb.data</filename>".
-            With the <filename>getName(e)</filename> method, one can get
+            In the previous example, an eventmask has been set so that
+            the handler only sees the "BuildStarted" and "BuildCompleted"
+            events.
+            This event handler gets called every time an event matching
+            the eventmask is triggered.
+            A global variable "e" is defined, which represents the current
+            event.
+            With the <filename>getName(e)</filename> method, you can get
             the name of the triggered event.
+            The global datastore is available as "d".
+            In legacy code, you might see "e.data" used to get the datastore.
+            However, realize that "e.data" is deprecated and you should use
+            "d" going forward.
         </para>
 
         <para>
-            Because you probably are only interested in a subset of events,
-            you would likely use the <filename>[eventmask]</filename> flag
-            for your event handler to be sure that only certain events
-            trigger the handler.
-            Given the previous example, suppose you only wanted the
-            <filename>bb.build.TaskFailed</filename> event to trigger that
-            event handler.
-            Use the flag as follows:
-            <literallayout class='monospaced'>
-     addhandler myclass_eventhandler
-     myclass_eventhandler[eventmask] = "bb.build.TaskFailed"
-     python myclass_eventhandler() {
-         from bb.event import getName
-         from bb import data
-         print("The name of the Event is %s" % getName(e))
-         print("The file we run for is %s" % data.getVar('FILE', e.data, True))
-     }
-            </literallayout>
+            The context of the datastore is appropriate to the event
+            in question.
+            For example, "BuildStarted" and "BuildCompleted" events run
+            before any tasks are executed so would be in the global
+            configuration datastore namespace.
+            No recipe-specific metadata exists in that namespace.
+            The "BuildStarted" and "BuildCompleted" events also run in
+            the main cooker/server process rather than any worker context.
+            Thus, any changes made to the datastore would be seen by other
+            cooker/server events within the current build but not seen
+            outside of that build or in any worker context.
+            Task events run in the actual tasks in question consequently
+            have recipe-specific and task-specific contents.
+            These events run in the worker context and are discarded at
+            the end of task execution.
         </para>
 
         <para>
-            During a standard build, the following common events might occur:
+            During a standard build, the following common events might
+            occur.
+            The following events are the most common kinds of events that
+            most metadata might have an interest in viewing:
             <itemizedlist>
                 <listitem><para>
-                    <filename>bb.event.ConfigParsed()</filename>
+                    <filename>bb.event.ConfigParsed()</filename>:
+                    Fired when the base configuration; which consists of
+                    <filename>bitbake.conf</filename>,
+                    <filename>base.bbclass</filename> and any global
+                    <filename>INHERIT</filename> statements; has been parsed.
+                    You can see multiple such events when each of the
+                    workers parse the base configuration or if the server
+                    changes configuration and reparses.
+                    Any given datastore only has one such event executed
+                    against it, however.
+                    If
+                    <link linkende='var-BB_INVALIDCONF'><filename>BB_INVALIDCONF</filename></link>
+                    is set in the datastore by the event handler, the
+                    configuration is reparsed and a new event triggered,
+                    allowing the metadata to update configuration.
+                    </para></listitem>
+                <listitem><para>
+                    <filename>bb.event.HeartbeatEvent()</filename>:
+                    Fires at regular time intervals of one second.
+                    You can configure the interval time using the
+                    <filename>BB_HEARTBEAT_EVENT</filename> variable.
+                    The event's "time" attribute is the
+                    <filename>time.time()</filename> value when the
+                    event is triggered.
+                    This event is useful for activities such as
+                    system state monitoring.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.ParseStarted()</filename>
+                    <filename>bb.event.ParseStarted()</filename>:
+                    Fired when BitBake is about to start parsing recipes.
+                    This event's "total" attribute represents the number of
+                    recipes BitBake plans to parse.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.ParseProgress()</filename>
+                    <filename>bb.event.ParseProgress()</filename>:
+                    Fired as parsing progresses.
+                    This event's "current" attribute is the number of
+                    recipes parsed as well as the "total" attribute.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.ParseCompleted()</filename>
+                    <filename>bb.event.ParseCompleted()</filename>:
+                    Fired when parsing is complete.
+                    This event's "cached", "parsed", "skipped", "virtuals",
+                    "masked", and "errors" attributes provide statistics
+                    for the parsing results.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.BuildStarted()</filename>
+                    <filename>bb.event.BuildStarted()</filename>:
+                    Fired when a new build starts.
+                    BitBake fires multiple "BuildStarted" events (one per configuration)
+                    when multiple configuration (multiconfig) is enabled.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.build.TaskStarted()</filename>
+                    <filename>bb.build.TaskStarted()</filename>:
+                    Fired when a task starts.
+                    This event's "taskfile" attribute points to the recipe
+                    from which the task originates.
+                    The "taskname" attribute, which is the task's name,
+                    includes the <filename>do_</filename> prefix, and the
+                    "logfile" attribute point to where the task's output is
+                    stored.
+                    Finally, the "time" attribute is the task's execution start
+                    time.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.build.TaskInvalid()</filename>
+                    <filename>bb.build.TaskInvalid()</filename>:
+                    Fired if BitBake tries to execute a task that does not exist.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.build.TaskFailedSilent()</filename>
+                    <filename>bb.build.TaskFailedSilent()</filename>:
+                    Fired for setscene tasks that fail and should not be
+                    presented to the user verbosely.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.build.TaskFailed()</filename>
+                    <filename>bb.build.TaskFailed()</filename>:
+                    Fired for normal tasks that fail.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.build.TaskSucceeded()</filename>
+                    <filename>bb.build.TaskSucceeded()</filename>:
+                    Fired when a task successfully completes.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.BuildCompleted()</filename>
+                    <filename>bb.event.BuildCompleted()</filename>:
+                    Fired when a build finishes.
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.cooker.CookerExit()</filename>
+                    <filename>bb.cooker.CookerExit()</filename>:
+                    Fired when the BitBake server/cooker shuts down.
+                    This event is usually only seen by the UIs as a
+                    sign they should also shutdown.
                     </para></listitem>
             </itemizedlist>
-            Here is a list of other events that occur based on specific requests
-            to the server:
+        </para>
+
+        <para>
+            This next list of example events occur based on specific
+            requests to the server.
+            These events are often used to communicate larger pieces of
+            information from the BitBake server to other parts of
+            BitBake such as user interfaces:
             <itemizedlist>
                 <listitem><para>
                     <filename>bb.event.TreeDataPreparationStarted()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.TreeDataPreparationProgress</filename>
+                    <filename>bb.event.TreeDataPreparationProgress()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.TreeDataPreparationCompleted</filename>
+                    <filename>bb.event.TreeDataPreparationCompleted()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.DepTreeGenerated</filename>
+                    <filename>bb.event.DepTreeGenerated()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.CoreBaseFilesFound</filename>
+                    <filename>bb.event.CoreBaseFilesFound()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.ConfigFilePathFound</filename>
+                    <filename>bb.event.ConfigFilePathFound()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.FilesMatchingFound</filename>
+                    <filename>bb.event.FilesMatchingFound()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.ConfigFilesFound</filename>
+                    <filename>bb.event.ConfigFilesFound()</filename>
                     </para></listitem>
                 <listitem><para>
-                    <filename>bb.event.TargetsTreeGenerated</filename>
+                    <filename>bb.event.TargetsTreeGenerated()</filename>
                     </para></listitem>
             </itemizedlist>
         </para>
@@ -2317,7 +2530,8 @@
                             <row>
                                 <entry align="left"><filename>d.getVar("X", expand)</filename></entry>
                                 <entry align="left">Returns the value of variable "X".
-                                Using "expand=True" expands the value.</entry>
+                                    Using "expand=True" expands the value.
+                                    Returns "None" if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.setVar("X", "value")</filename></entry>
@@ -2325,24 +2539,32 @@
                             </row>
                             <row>
                                 <entry align="left"><filename>d.appendVar("X", "value")</filename></entry>
-                                <entry align="left">Adds "value" to the end of the variable "X".</entry>
+                                <entry align="left">Adds "value" to the end of the variable "X".
+                                    Acts like <filename>d.setVar("X", "value")</filename>
+                                    if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.prependVar("X", "value")</filename></entry>
-                                <entry align="left">Adds "value" to the start of the variable "X".</entry>
+                                <entry align="left">Adds "value" to the start of the variable "X".
+                                    Acts like <filename>d.setVar("X", "value")</filename>
+                                    if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.delVar("X")</filename></entry>
-                                <entry align="left">Deletes the variable "X" from the datastore.</entry>
+                                <entry align="left">Deletes the variable "X" from the datastore.
+                                    Does nothing if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.renameVar("X", "Y")</filename></entry>
-                                <entry align="left">Renames the variable "X" to "Y".</entry>
+                                <entry align="left">Renames the variable "X" to "Y".
+                                    Does nothing if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.getVarFlag("X", flag, expand)</filename></entry>
                                 <entry align="left">Returns the value of variable "X".
-                                    Using "expand=True" expands the value.</entry>
+                                    Using "expand=True" expands the value.
+                                    Returns "None" if either the variable "X" or the named flag
+                                    does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.setVarFlag("X", flag, "value")</filename></entry>
@@ -2351,12 +2573,16 @@
                             <row>
                                 <entry align="left"><filename>d.appendVarFlag("X", flag, "value")</filename></entry>
                                 <entry align="left">Appends "value" to the named flag on the
-                                variable "X".</entry>
+                                    variable "X".
+                                    Acts like <filename>d.setVarFlag("X", flag, "value")</filename>
+                                    if the named flag does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.prependVarFlag("X", flag, "value")</filename></entry>
                                 <entry align="left">Prepends "value" to the named flag on
-                                   the variable "X".</entry>
+                                    the variable "X".
+                                    Acts like <filename>d.setVarFlag("X", flag, "value")</filename>
+                                    if the named flag does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.delVarFlag("X", flag)</filename></entry>
@@ -2372,16 +2598,23 @@
                             </row>
                             <row>
                                 <entry align="left"><filename>d.getVarFlags("X")</filename></entry>
-                                <entry align="left">Returns a <filename>flagsdict</filename> of the flags for
-                                    the variable "X".</entry>
+                                <entry align="left">Returns a <filename>flagsdict</filename>
+                                    of the flags for the variable "X".
+                                    Returns "None" if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.delVarFlags("X")</filename></entry>
-                                <entry align="left">Deletes all the flags for the variable "X".</entry>
+                                <entry align="left">Deletes all the flags for the variable "X".
+                                    Does nothing if the variable "X" does not exist.</entry>
                             </row>
                             <row>
                                 <entry align="left"><filename>d.expand(expression)</filename></entry>
-                                <entry align="left">Expands variable references in the specified string expression.</entry>
+                                <entry align="left">Expands variable references in the specified
+                                    string expression.
+                                    References to variables that do not exist are left as is.
+                                    For example, <filename>d.expand("foo ${X}")</filename>
+                                    expands to the literal string "foo ${X}" if the
+                                    variable "X" does not exist.</entry>
                             </row>
                         </tbody>
                     </tgroup>
@@ -2445,7 +2678,7 @@
                     the "setscene" part of the task's execution in order
                     to validate the list of task hashes.
                     </para></listitem>
-                <listitem><para><filename>BB_SETSCENE_VERIFY_FUNCTION</filename>
+                <listitem><para><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename>
                     Specifies a function to call that verifies the list of
                     planned task execution before the main task execution
                     happens.

+ 15 - 25
bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.xml

@@ -716,7 +716,7 @@
             </glossdef>
         </glossentry>
 
-        <glossentry id='var-BB_SETSCENE_VERIFY_FUNCTION'><glossterm>BB_SETSCENE_VERIFY_FUNCTION</glossterm>
+        <glossentry id='var-BB_SETSCENE_VERIFY_FUNCTION2'><glossterm>BB_SETSCENE_VERIFY_FUNCTION2</glossterm>
             <glossdef>
                 <para>
                     Specifies a function to call that verifies the list of
@@ -1143,8 +1143,6 @@
             <glossdef>
                 <para>
                     Sets the base location where layers are stored.
-                    By default, this location is set to
-                    <filename>${COREBASE}</filename>.
                     This setting is used in conjunction with
                     <filename>bitbake-layers layerindex-fetch</filename> and
                     tells <filename>bitbake-layers</filename> where to place
@@ -1539,24 +1537,6 @@
             </glossdef>
         </glossentry>
 
-        <glossentry id='var-FILESDIR'><glossterm>FILESDIR</glossterm>
-            <glossdef>
-                <para>
-                    Specifies directories BitBake uses when searching for
-                    patches and files.
-                    The "local" fetcher module uses these directories when
-                    handling <filename>file://</filename> URLs if the file
-                    was not found using
-                    <link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>.
-                    <note>
-                        The <filename>FILESDIR</filename> variable is
-                        deprecated and you should use
-                        <filename>FILESPATH</filename> in all new code.
-                    </note>
-                </para>
-            </glossdef>
-        </glossentry>
-
         <glossentry id='var-FILESPATH'><glossterm>FILESPATH</glossterm>
             <glossdef>
                 <para>
@@ -1614,9 +1594,19 @@
         <glossentry id='var-INHERIT'><glossterm>INHERIT</glossterm>
             <glossdef>
                 <para>
-                    Causes the named class to be inherited at
-                    this point during parsing.
-                    The variable is only valid in configuration files.
+                    Causes the named class or classes to be inherited globally.
+                    Anonymous functions in the class or classes
+                    are not executed for the
+                    base configuration and in each individual recipe.
+                    The OpenEmbedded build system ignores changes to
+                    <filename>INHERIT</filename> in individual recipes.
+                </para>
+
+                <para>
+                    For more information on <filename>INHERIT</filename>, see
+                    the
+                    "<link linkend="inherit-configuration-directive"><filename>INHERIT</filename> Configuration Directive</link>"
+                    section.
                 </para>
             </glossdef>
         </glossentry>
@@ -1911,7 +1901,7 @@
                     Here are two examples:
                     <literallayout class='monospaced'>
      PREFERRED_VERSION_python = "2.7.3"
-     PREFERRED_VERSION_linux-yocto = "3.10%"
+     PREFERRED_VERSION_linux-yocto = "4.12%"
                     </literallayout>
                 </para>
             </glossdef>

+ 1 - 1
bitbake/doc/bitbake-user-manual/bitbake-user-manual.xml

@@ -56,7 +56,7 @@
 -->
 
         <copyright>
-            <year>2004-2016</year>
+            <year>2004-2017</year>
             <holder>Richard Purdie</holder>
             <holder>Chris Larson</holder>
             <holder>and Phil Blundell</holder>

+ 1 - 1
bitbake/doc/bitbake.1

@@ -105,7 +105,7 @@ Show debug logging for the specified logging domains
 profile the command and print a report
 .TP
 .B \-uUI, \-\-ui=UI
-User interface to use. Currently, hob, depexp, goggle or ncurses can be specified as UI.
+User interface to use. Currently, knotty, taskexp or ncurses can be specified as UI.
 .TP
 .B \-tSERVERTYPE, \-\-servertype=SERVERTYPE
 Choose which server to use, none, process or xmlrpc.

+ 13 - 13
bitbake/lib/bb/COW.py

@@ -3,7 +3,7 @@
 #
 # This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
 #
-# Copyright (C) 2006 Tim Amsell
+# Copyright (C) 2006 Tim Ansell
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 as
@@ -213,11 +213,11 @@ if __name__ == "__main__":
     print()
 
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)
-    for x in b.items():
+    for x in b.iteritems():
         print(x)
     print()
 
@@ -225,11 +225,11 @@ if __name__ == "__main__":
     b['a'] = 'c'
 
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)
-    for x in b.items():
+    for x in b.iteritems():
         print(x)
     print()
 
@@ -244,22 +244,22 @@ if __name__ == "__main__":
     a['set'].add("o2")
 
     print("a", a)
-    for x in a['set'].values():
+    for x in a['set'].itervalues():
         print(x)
     print("--")
     print("b", b)
-    for x in b['set'].values():
+    for x in b['set'].itervalues():
         print(x)
     print()
 
     b['set'].add('o3')
 
     print("a", a)
-    for x in a['set'].values():
+    for x in a['set'].itervalues():
         print(x)
     print("--")
     print("b", b)
-    for x in b['set'].values():
+    for x in b['set'].itervalues():
         print(x)
     print()
 
@@ -269,7 +269,7 @@ if __name__ == "__main__":
     a['set2'].add("o2")
 
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)
@@ -289,7 +289,7 @@ if __name__ == "__main__":
         print("Yay - has_key with delete works!")
 
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)
@@ -300,7 +300,7 @@ if __name__ == "__main__":
     b.__revertitem__('b')
 
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)
@@ -310,7 +310,7 @@ if __name__ == "__main__":
 
     b.__revertitem__('dict')
     print("a", a)
-    for x in a.items():
+    for x in a.iteritems():
         print(x)
     print("--")
     print("b", b)

+ 1 - 1
bitbake/lib/bb/__init__.py

@@ -21,7 +21,7 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
-__version__ = "1.31.2"
+__version__ = "1.37.0"
 
 import sys
 if sys.version_info < (3, 4, 0):

+ 77 - 26
bitbake/lib/bb/build.py

@@ -91,13 +91,14 @@ class TaskBase(event.Event):
 
     def __init__(self, t, logfile, d):
         self._task = t
-        self._package = d.getVar("PF", True)
-        self.taskfile = d.getVar("FILE", True)
+        self._package = d.getVar("PF")
+        self._mc = d.getVar("BB_CURRENT_MC")
+        self.taskfile = d.getVar("FILE")
         self.taskname = self._task
         self.logfile = logfile
         self.time = time.time()
         event.Event.__init__(self)
-        self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
+        self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName())
 
     def getTask(self):
         return self._task
@@ -194,13 +195,13 @@ def exec_func(func, d, dirs = None, pythonexception=False):
         oldcwd = None
 
     flags = d.getVarFlags(func)
-    cleandirs = flags.get('cleandirs')
+    cleandirs = flags.get('cleandirs') if flags else None
     if cleandirs:
         for cdir in d.expand(cleandirs).split():
             bb.utils.remove(cdir, True)
             bb.utils.mkdirhier(cdir)
 
-    if dirs is None:
+    if flags and dirs is None:
         dirs = flags.get('dirs')
         if dirs:
             dirs = d.expand(dirs).split()
@@ -226,17 +227,17 @@ def exec_func(func, d, dirs = None, pythonexception=False):
     else:
         lockfiles = None
 
-    tempdir = d.getVar('T', True)
+    tempdir = d.getVar('T')
 
     # or func allows items to be executed outside of the normal
     # task set, such as buildhistory
-    task = d.getVar('BB_RUNTASK', True) or func
+    task = d.getVar('BB_RUNTASK') or func
     if task == func:
         taskfunc = task
     else:
         taskfunc = "%s.%s" % (task, func)
 
-    runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
+    runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
     runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
     runfile = os.path.join(tempdir, runfn)
     bb.utils.mkdirhier(os.path.dirname(runfile))
@@ -368,7 +369,7 @@ exit $ret
 
     cmd = runfile
     if d.getVarFlag(func, 'fakeroot', False):
-        fakerootcmd = d.getVar('FAKEROOT', True)
+        fakerootcmd = d.getVar('FAKEROOT')
         if fakerootcmd:
             cmd = [fakerootcmd, runfile]
 
@@ -377,7 +378,7 @@ exit $ret
     else:
         logfile = sys.stdout
 
-    progress = d.getVarFlag(func, 'progress', True)
+    progress = d.getVarFlag(func, 'progress')
     if progress:
         if progress == 'percent':
             # Use default regex
@@ -429,7 +430,7 @@ exit $ret
             else:
                 break
 
-    tempdir = d.getVar('T', True)
+    tempdir = d.getVar('T')
     fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
     if os.path.exists(fifopath):
         os.unlink(fifopath)
@@ -442,7 +443,7 @@ exit $ret
                 with open(os.devnull, 'r+') as stdin:
                     bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
             except bb.process.CmdError:
-                logfn = d.getVar('BB_LOGFILE', True)
+                logfn = d.getVar('BB_LOGFILE')
                 raise FuncFailed(func, logfn)
         finally:
             os.unlink(fifopath)
@@ -473,18 +474,18 @@ def _exec_task(fn, task, d, quieterr):
     logger.debug(1, "Executing task %s", task)
 
     localdata = _task_data(fn, task, d)
-    tempdir = localdata.getVar('T', True)
+    tempdir = localdata.getVar('T')
     if not tempdir:
         bb.fatal("T variable not set, unable to build")
 
     # Change nice level if we're asked to
-    nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
+    nice = localdata.getVar("BB_TASK_NICE_LEVEL")
     if nice:
         curnice = os.nice(0)
         nice = int(nice) - curnice
         newnice = os.nice(nice)
         logger.debug(1, "Renice to %s " % newnice)
-    ionice = localdata.getVar("BB_TASK_IONICE_LEVEL", True)
+    ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
     if ionice:
         try:
             cls, prio = ionice.split(".", 1)
@@ -495,7 +496,7 @@ def _exec_task(fn, task, d, quieterr):
     bb.utils.mkdirhier(tempdir)
 
     # Determine the logfile to generate
-    logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
+    logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
     logbase = logfmt.format(task=task, pid=os.getpid())
 
     # Document the order of the tasks...
@@ -562,6 +563,7 @@ def _exec_task(fn, task, d, quieterr):
 
     localdata.setVar('BB_LOGFILE', logfn)
     localdata.setVar('BB_RUNTASK', task)
+    localdata.setVar('BB_TASK_LOGGER', bblogger)
 
     flags = localdata.getVarFlags(task)
 
@@ -627,7 +629,7 @@ def exec_task(fn, task, d, profile = False):
             quieterr = True
 
         if profile:
-            profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task)
+            profname = "profile-%s.log" % (d.getVar("PN") + "-" + task)
             try:
                 import cProfile as profile
             except:
@@ -667,9 +669,9 @@ def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
         stamp = d.stamp[file_name]
         extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
     else:
-        stamp = d.getVar('STAMP', True)
-        file_name = d.getVar('BB_FILENAME', True)
-        extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
+        stamp = d.getVar('STAMP')
+        file_name = d.getVar('BB_FILENAME')
+        extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
 
     if baseonly:
         return stamp
@@ -703,9 +705,9 @@ def stamp_cleanmask_internal(taskname, d, file_name):
         stamp = d.stampclean[file_name]
         extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
     else:
-        stamp = d.getVar('STAMPCLEAN', True)
-        file_name = d.getVar('BB_FILENAME', True)
-        extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
+        stamp = d.getVar('STAMPCLEAN')
+        file_name = d.getVar('BB_FILENAME')
+        extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
 
     if not stamp:
         return []
@@ -723,7 +725,7 @@ def make_stamp(task, d, file_name = None):
     for mask in cleanmask:
         for name in glob.glob(mask):
             # Preserve sigdata files in the stamps directory
-            if "sigdata" in name:
+            if "sigdata" in name or "sigbasedata" in name:
                 continue
             # Preserve taint files in the stamps directory
             if name.endswith('.taint'):
@@ -741,7 +743,7 @@ def make_stamp(task, d, file_name = None):
     # as it completes
     if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
         stampbase = stamp_internal(task, d, None, True)
-        file_name = d.getVar('BB_FILENAME', True)
+        file_name = d.getVar('BB_FILENAME')
         bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
 
 def del_stamp(task, d, file_name = None):
@@ -763,7 +765,7 @@ def write_taint(task, d, file_name = None):
     if file_name:
         taintfn = d.stamp[file_name] + '.' + task + '.taint'
     else:
-        taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
+        taintfn = d.getVar('STAMP') + '.' + task + '.taint'
     bb.utils.mkdirhier(os.path.dirname(taintfn))
     # The specific content of the taint file is not really important,
     # we just need it to be random, so a random UUID is used
@@ -860,3 +862,52 @@ def deltask(task, d):
         if task in deps:
             deps.remove(task)
             d.setVarFlag(bbtask, 'deps', deps)
+
+def preceedtask(task, with_recrdeptasks, d):
+    """
+    Returns a set of tasks in the current recipe which were specified as
+    precondition by the task itself ("after") or which listed themselves
+    as precondition ("before"). Preceeding tasks specified via the
+    "recrdeptask" are included in the result only if requested. Beware
+    that this may lead to the task itself being listed.
+    """
+    preceed = set()
+
+    # Ignore tasks which don't exist
+    tasks = d.getVar('__BBTASKS', False)
+    if task not in tasks:
+        return preceed
+
+    preceed.update(d.getVarFlag(task, 'deps') or [])
+    if with_recrdeptasks:
+        recrdeptask = d.getVarFlag(task, 'recrdeptask')
+        if recrdeptask:
+            preceed.update(recrdeptask.split())
+    return preceed
+
+def tasksbetween(task_start, task_end, d):
+    """
+    Return the list of tasks between two tasks in the current recipe,
+    where task_start is to start at and task_end is the task to end at
+    (and task_end has a dependency chain back to task_start).
+    """
+    outtasks = []
+    tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys()))
+    def follow_chain(task, endtask, chain=None):
+        if not chain:
+            chain = []
+        chain.append(task)
+        for othertask in tasks:
+            if othertask == task:
+                continue
+            if task == endtask:
+                for ctask in chain:
+                    if ctask not in outtasks:
+                        outtasks.append(ctask)
+            else:
+                deps = d.getVarFlag(othertask, 'deps', False)
+                if task in deps:
+                    follow_chain(othertask, endtask, chain)
+        chain.pop()
+    follow_chain(task_start, task_end)
+    return outtasks

+ 25 - 22
bitbake/lib/bb/cache.py

@@ -37,7 +37,7 @@ import bb.utils
 
 logger = logging.getLogger("BitBake.Cache")
 
-__cache_version__ = "150"
+__cache_version__ = "151"
 
 def getCacheFile(path, filename, data_hash):
     return os.path.join(path, filename + "." + data_hash)
@@ -71,7 +71,7 @@ class RecipeInfoCommon(object):
 
     @classmethod
     def flaglist(cls, flag, varlist, metadata, squash=False):
-        out_dict = dict((var, metadata.getVarFlag(var, flag, True))
+        out_dict = dict((var, metadata.getVarFlag(var, flag))
                     for var in varlist)
         if squash:
             return dict((k,v) for (k,v) in out_dict.items() if v)
@@ -86,9 +86,9 @@ class RecipeInfoCommon(object):
 class CoreRecipeInfo(RecipeInfoCommon):
     __slots__ = ()
 
-    cachefile = "bb_cache.dat"   
+    cachefile = "bb_cache.dat"
 
-    def __init__(self, filename, metadata):      
+    def __init__(self, filename, metadata):
         self.file_depends = metadata.getVar('__depends', False)
         self.timestamp = bb.parse.cached_mtime(filename)
         self.variants = self.listvar('__VARIANTS', metadata) + ['']
@@ -107,7 +107,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
 
         self.pn = self.getvar('PN', metadata)
         self.packages = self.listvar('PACKAGES', metadata)
-        if not self.pn in self.packages:
+        if not self.packages:
             self.packages.append(self.pn)
 
         self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
@@ -122,7 +122,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
         self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
         self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
         self.stamp = self.getvar('STAMP', metadata)
-        self.stampclean = self.getvar('STAMPCLEAN', metadata)        
+        self.stampclean = self.getvar('STAMPCLEAN', metadata)
         self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
         self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
         self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
@@ -217,7 +217,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
             cachedata.packages_dynamic[package].append(fn)
 
         # Build hash of runtime depends and recommends
-        for package in self.packages + [self.pn]:
+        for package in self.packages:
             cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
             cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
 
@@ -296,7 +296,7 @@ def parse_recipe(bb_data, bbfile, appends, mc=''):
     bb_data.setVar("__BBMULTICONFIG", mc)
 
     # expand tmpdir to include this topdir
-    bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR', True) or "")
+    bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
     bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
     oldpath = os.path.abspath(os.getcwd())
     bb.parse.cached_mtime_noerror(bbfile_loc)
@@ -375,10 +375,10 @@ class Cache(NoCache):
         data = databuilder.data
 
         # Pass caches_array information into Cache Constructor
-        # It will be used later for deciding whether we 
-        # need extra cache file dump/load support 
+        # It will be used later for deciding whether we
+        # need extra cache file dump/load support
         self.caches_array = caches_array
-        self.cachedir = data.getVar("CACHE", True)
+        self.cachedir = data.getVar("CACHE")
         self.clean = set()
         self.checked = set()
         self.depends_cache = {}
@@ -421,7 +421,7 @@ class Cache(NoCache):
                 cachesize += os.fstat(cachefile.fileno()).st_size
 
         bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
-        
+
         for cache_class in self.caches_array:
             cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
             with open(cachefile, "rb") as cachefile:
@@ -438,8 +438,8 @@ class Cache(NoCache):
                     logger.info('Cache version mismatch, rebuilding...')
                     return
                 elif bitbake_ver != bb.__version__:
-                     logger.info('Bitbake version mismatch, rebuilding...')
-                     return
+                    logger.info('Bitbake version mismatch, rebuilding...')
+                    return
 
                 # Load the rest of the cache file
                 current_progress = 0
@@ -462,6 +462,10 @@ class Cache(NoCache):
                         self.depends_cache[key] = [value]
                     # only fire events on even percentage boundaries
                     current_progress = cachefile.tell() + previous_progress
+                    if current_progress > cachesize:
+                        # we might have calculated incorrect total size because a file
+                        # might've been written out just after we checked its size
+                        cachesize = current_progress
                     current_percent = 100 * current_progress / cachesize
                     if current_percent > previous_percent:
                         previous_percent = current_percent
@@ -612,13 +616,13 @@ class Cache(NoCache):
                     a = fl.find(":True")
                     b = fl.find(":False")
                     if ((a < 0) and b) or ((b > 0) and (b < a)):
-                       f = fl[:b+6]
-                       fl = fl[b+7:]
+                        f = fl[:b+6]
+                        fl = fl[b+7:]
                     elif ((b < 0) and a) or ((a > 0) and (a < b)):
-                       f = fl[:a+5]
-                       fl = fl[a+6:]
+                        f = fl[:a+5]
+                        fl = fl[a+6:]
                     else:
-                       break
+                        break
                     fl = fl.strip()
                     if "*" in f:
                         continue
@@ -792,8 +796,8 @@ class MultiProcessCache(object):
         self.cachedata_extras = self.create_cachedata()
 
     def init_cache(self, d, cache_file_name=None):
-        cachedir = (d.getVar("PERSISTENT_DIR", True) or
-                    d.getVar("CACHE", True))
+        cachedir = (d.getVar("PERSISTENT_DIR") or
+                    d.getVar("CACHE"))
         if cachedir in [None, '']:
             return
         bb.utils.mkdirhier(cachedir)
@@ -882,4 +886,3 @@ class MultiProcessCache(object):
             p.dump([data, self.__class__.CACHE_VERSION])
 
         bb.utils.unlockfile(glf)
-

+ 50 - 16
bitbake/lib/bb/codeparser.py

@@ -1,3 +1,22 @@
+"""
+BitBake code parser
+
+Parses actual code (i.e. python and shell) for functions and in-line
+expressions. Used mainly to determine dependencies on other functions
+and variables within the BitBake metadata. Also provides a cache for
+this information in order to speed up processing.
+
+(Not to be confused with the code that parses the metadata itself,
+see lib/bb/parse/ for that).
+
+NOTE: if you change how the parsers gather information you will almost
+certainly need to increment CodeParserCache.CACHE_VERSION below so that
+any existing codeparser cache gets invalidated. Additionally you'll need
+to increment __cache_version__ in cache.py in order to ensure that old
+recipe caches don't trigger "Taskhash mismatch" errors.
+
+"""
+
 import ast
 import sys
 import codegen
@@ -117,7 +136,11 @@ class shellCacheLine(object):
 
 class CodeParserCache(MultiProcessCache):
     cache_file_name = "bb_codeparser.dat"
-    CACHE_VERSION = 8
+    # NOTE: you must increment this if you change how the parsers gather information,
+    # so that an existing cache gets invalidated. Additionally you'll need
+    # to increment __cache_version__ in cache.py in order to ensure that old
+    # recipe caches don't trigger "Taskhash mismatch" errors.
+    CACHE_VERSION = 9
 
     def __init__(self):
         MultiProcessCache.__init__(self)
@@ -186,13 +209,15 @@ class BufferedLogger(Logger):
 
     def flush(self):
         for record in self.buffer:
-            self.target.handle(record)
+            if self.target.isEnabledFor(record.levelno):
+                self.target.handle(record)
         self.buffer = []
 
 class PythonParser():
     getvars = (".getVar", ".appendVar", ".prependVar")
     getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
-    containsfuncs = ("bb.utils.contains", "base_contains", "bb.utils.contains_any")
+    containsfuncs = ("bb.utils.contains", "base_contains")
+    containsanyfuncs = ("bb.utils.contains_any",  "bb.utils.filter")
     execfuncs = ("bb.build.exec_func", "bb.build.exec_task")
 
     def warn(self, func, arg):
@@ -211,13 +236,17 @@ class PythonParser():
 
     def visit_Call(self, node):
         name = self.called_node_name(node.func)
-        if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs):
+        if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
             if isinstance(node.args[0], ast.Str):
                 varname = node.args[0].s
                 if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
                     if varname not in self.contains:
                         self.contains[varname] = set()
                     self.contains[varname].add(node.args[1].s)
+                elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
+                    if varname not in self.contains:
+                        self.contains[varname] = set()
+                    self.contains[varname].update(node.args[1].s.split())
                 elif name.endswith(self.getvarflags):
                     if isinstance(node.args[1], ast.Str):
                         self.references.add('%s[%s]' % (varname, node.args[1].s))
@@ -342,8 +371,7 @@ class ShellParser():
         except pyshlex.NeedMore:
             raise sherrors.ShellSyntaxError("Unexpected EOF")
 
-        for token in tokens:
-            self.process_tokens(token)
+        self.process_tokens(tokens)
 
     def process_tokens(self, tokens):
         """Process a supplied portion of the syntax tree as returned by
@@ -389,18 +417,24 @@ class ShellParser():
             "case_clause": case_clause,
         }
 
-        for token in tokens:
-            name, value = token
-            try:
-                more_tokens, words = token_handlers[name](value)
-            except KeyError:
-                raise NotImplementedError("Unsupported token type " + name)
+        def process_token_list(tokens):
+            for token in tokens:
+                if isinstance(token, list):
+                    process_token_list(token)
+                    continue
+                name, value = token
+                try:
+                    more_tokens, words = token_handlers[name](value)
+                except KeyError:
+                    raise NotImplementedError("Unsupported token type " + name)
+
+                if more_tokens:
+                    self.process_tokens(more_tokens)
 
-            if more_tokens:
-                self.process_tokens(more_tokens)
+                if words:
+                    self.process_words(words)
 
-            if words:
-                self.process_words(words)
+        process_token_list(tokens)
 
     def process_words(self, words):
         """Process a set of 'words' in pyshyacc parlance, which includes

+ 356 - 65
bitbake/lib/bb/command.py

@@ -28,8 +28,15 @@ and must not trigger events, directly or indirectly.
 Commands are queued in a CommandQueue
 """
 
+from collections import OrderedDict, defaultdict
+
 import bb.event
 import bb.cooker
+import bb.remotedata
+
+class DataStoreConnectionHandle(object):
+    def __init__(self, dsindex=0):
+        self.dsindex = dsindex
 
 class CommandCompleted(bb.event.Event):
     pass
@@ -43,6 +50,8 @@ class CommandFailed(CommandExit):
     def __init__(self, message):
         self.error = message
         CommandExit.__init__(self, 1)
+    def __str__(self):
+        return "Command execution failed: %s" % self.error
 
 class CommandError(Exception):
     pass
@@ -55,6 +64,7 @@ class Command:
         self.cooker = cooker
         self.cmds_sync = CommandsSync()
         self.cmds_async = CommandsAsync()
+        self.remotedatastores = bb.remotedata.RemoteDatastores(cooker)
 
         # FIXME Add lock for this
         self.currentAsyncCommand = None
@@ -68,7 +78,8 @@ class Command:
                 if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'):
                     return None, "Not able to execute not readonly commands in readonly mode"
             try:
-                if getattr(command_method, 'needconfig', False):
+                self.cooker.process_inotify_updates()
+                if getattr(command_method, 'needconfig', True):
                     self.cooker.updateCacheSync()
                 result = command_method(self, commandline)
             except CommandError as exc:
@@ -88,6 +99,7 @@ class Command:
 
     def runAsyncCommand(self):
         try:
+            self.cooker.process_inotify_updates()
             if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
                 # updateCache will trigger a shutdown of the parser
                 # and then raise BBHandledException triggering an exit
@@ -125,14 +137,23 @@ class Command:
 
     def finishAsyncCommand(self, msg=None, code=None):
         if msg or msg == "":
-            bb.event.fire(CommandFailed(msg), self.cooker.expanded_data)
+            bb.event.fire(CommandFailed(msg), self.cooker.data)
         elif code:
-            bb.event.fire(CommandExit(code), self.cooker.expanded_data)
+            bb.event.fire(CommandExit(code), self.cooker.data)
         else:
-            bb.event.fire(CommandCompleted(), self.cooker.expanded_data)
+            bb.event.fire(CommandCompleted(), self.cooker.data)
         self.currentAsyncCommand = None
         self.cooker.finishcommand()
 
+    def reset(self):
+        self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
+
+def split_mc_pn(pn):
+    if pn.startswith("multiconfig:"):
+        _, mc, pn = pn.split(":", 2)
+        return (mc, pn)
+    return ('', pn)
+
 class CommandsSync:
     """
     A class of synchronous commands
@@ -179,6 +200,7 @@ class CommandsSync:
         """
         varname = params[0]
         value = str(params[1])
+        command.cooker.extraconfigdata[varname] = value
         command.cooker.data.setVar(varname, value)
 
     def getSetVariable(self, command, params):
@@ -218,59 +240,15 @@ class CommandsSync:
         command.cooker.configuration.postfile = postfiles
     setPrePostConfFiles.needconfig = False
 
-    def getCpuCount(self, command, params):
-        """
-        Get the CPU count on the bitbake server
-        """
-        return bb.utils.cpu_count()
-    getCpuCount.readonly = True
-    getCpuCount.needconfig = False
-
     def matchFile(self, command, params):
         fMatch = params[0]
         return command.cooker.matchFile(fMatch)
     matchFile.needconfig = False
 
-    def generateNewImage(self, command, params):
-        image = params[0]
-        base_image = params[1]
-        package_queue = params[2]
-        timestamp = params[3]
-        description = params[4]
-        return command.cooker.generateNewImage(image, base_image,
-                        package_queue, timestamp, description)
-
-    def ensureDir(self, command, params):
-        directory = params[0]
-        bb.utils.mkdirhier(directory)
-    ensureDir.needconfig = False
-
-    def setVarFile(self, command, params):
-        """
-        Save a variable in a file; used for saving in a configuration file
-        """
-        var = params[0]
-        val = params[1]
-        default_file = params[2]
-        op = params[3]
-        command.cooker.modifyConfigurationVar(var, val, default_file, op)
-    setVarFile.needconfig = False
-
-    def removeVarFile(self, command, params):
-        """
-        Remove a variable declaration from a file
-        """
-        var = params[0]
-        command.cooker.removeConfigurationVar(var)
-    removeVarFile.needconfig = False
-
-    def createConfigFile(self, command, params):
-        """
-        Create an extra configuration file
-        """
-        name = params[0]
-        command.cooker.createConfigFile(name)
-    createConfigFile.needconfig = False
+    def getUIHandlerNum(self, command, params):
+        return bb.event.get_uihandler()
+    getUIHandlerNum.needconfig = False
+    getUIHandlerNum.readonly = True
 
     def setEventMask(self, command, params):
         handlerNum = params[0]
@@ -295,9 +273,307 @@ class CommandsSync:
     def updateConfig(self, command, params):
         options = params[0]
         environment = params[1]
-        command.cooker.updateConfigOpts(options, environment)
+        cmdline = params[2]
+        command.cooker.updateConfigOpts(options, environment, cmdline)
     updateConfig.needconfig = False
 
+    def parseConfiguration(self, command, params):
+        """Instruct bitbake to parse its configuration
+        NOTE: it is only necessary to call this if you aren't calling any normal action
+        (otherwise parsing is taken care of automatically)
+        """
+        command.cooker.parseConfiguration()
+    parseConfiguration.needconfig = False
+
+    def getLayerPriorities(self, command, params):
+        command.cooker.parseConfiguration()
+        ret = []
+        # regex objects cannot be marshalled by xmlrpc
+        for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities:
+            ret.append((collection, pattern, regex.pattern, pri))
+        return ret
+    getLayerPriorities.readonly = True
+
+    def getRecipes(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return list(command.cooker.recipecaches[mc].pkg_pn.items())
+    getRecipes.readonly = True
+
+    def getRecipeDepends(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return list(command.cooker.recipecaches[mc].deps.items())
+    getRecipeDepends.readonly = True
+
+    def getRecipeVersions(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].pkg_pepvpr
+    getRecipeVersions.readonly = True
+
+    def getRecipeProvides(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].fn_provides
+    getRecipeProvides.readonly = True
+
+    def getRecipePackages(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].packages
+    getRecipePackages.readonly = True
+
+    def getRecipePackagesDynamic(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].packages_dynamic
+    getRecipePackagesDynamic.readonly = True
+
+    def getRProviders(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].rproviders
+    getRProviders.readonly = True
+
+    def getRuntimeDepends(self, command, params):
+        ret = []
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        rundeps = command.cooker.recipecaches[mc].rundeps
+        for key, value in rundeps.items():
+            if isinstance(value, defaultdict):
+                value = dict(value)
+            ret.append((key, value))
+        return ret
+    getRuntimeDepends.readonly = True
+
+    def getRuntimeRecommends(self, command, params):
+        ret = []
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        runrecs = command.cooker.recipecaches[mc].runrecs
+        for key, value in runrecs.items():
+            if isinstance(value, defaultdict):
+                value = dict(value)
+            ret.append((key, value))
+        return ret
+    getRuntimeRecommends.readonly = True
+
+    def getRecipeInherits(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].inherits
+    getRecipeInherits.readonly = True
+
+    def getBbFilePriority(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].bbfile_priority
+    getBbFilePriority.readonly = True
+
+    def getDefaultPreference(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return command.cooker.recipecaches[mc].pkg_dp
+    getDefaultPreference.readonly = True
+
+    def getSkippedRecipes(self, command, params):
+        # Return list sorted by reverse priority order
+        import bb.cache
+        skipdict = OrderedDict(sorted(command.cooker.skiplist.items(),
+                                      key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0])))
+        return list(skipdict.items())
+    getSkippedRecipes.readonly = True
+
+    def getOverlayedRecipes(self, command, params):
+        return list(command.cooker.collection.overlayed.items())
+    getOverlayedRecipes.readonly = True
+
+    def getFileAppends(self, command, params):
+        fn = params[0]
+        return command.cooker.collection.get_file_appends(fn)
+    getFileAppends.readonly = True
+
+    def getAllAppends(self, command, params):
+        return command.cooker.collection.bbappends
+    getAllAppends.readonly = True
+
+    def findProviders(self, command, params):
+        return command.cooker.findProviders()
+    findProviders.readonly = True
+
+    def findBestProvider(self, command, params):
+        (mc, pn) = split_mc_pn(params[0])
+        return command.cooker.findBestProvider(pn, mc)
+    findBestProvider.readonly = True
+
+    def allProviders(self, command, params):
+        try:
+            mc = params[0]
+        except IndexError:
+            mc = ''
+        return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items())
+    allProviders.readonly = True
+
+    def getRuntimeProviders(self, command, params):
+        rprovide = params[0]
+        try:
+            mc = params[1]
+        except IndexError:
+            mc = ''
+        all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide)
+        if all_p:
+            best = bb.providers.filterProvidersRunTime(all_p, rprovide,
+                            command.cooker.data,
+                            command.cooker.recipecaches[mc])[0][0]
+        else:
+            best = None
+        return all_p, best
+    getRuntimeProviders.readonly = True
+
+    def dataStoreConnectorFindVar(self, command, params):
+        dsindex = params[0]
+        name = params[1]
+        datastore = command.remotedatastores[dsindex]
+        value, overridedata = datastore._findVar(name)
+
+        if value:
+            content = value.get('_content', None)
+            if isinstance(content, bb.data_smart.DataSmart):
+                # Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully
+                idx = command.remotedatastores.check_store(content, True)
+                return {'_content': DataStoreConnectionHandle(idx),
+                        '_connector_origtype': 'DataStoreConnectionHandle',
+                        '_connector_overrides': overridedata}
+            elif isinstance(content, set):
+                return {'_content': list(content),
+                        '_connector_origtype': 'set',
+                        '_connector_overrides': overridedata}
+            else:
+                value['_connector_overrides'] = overridedata
+        else:
+            value = {}
+            value['_connector_overrides'] = overridedata
+        return value
+    dataStoreConnectorFindVar.readonly = True
+
+    def dataStoreConnectorGetKeys(self, command, params):
+        dsindex = params[0]
+        datastore = command.remotedatastores[dsindex]
+        return list(datastore.keys())
+    dataStoreConnectorGetKeys.readonly = True
+
+    def dataStoreConnectorGetVarHistory(self, command, params):
+        dsindex = params[0]
+        name = params[1]
+        datastore = command.remotedatastores[dsindex]
+        return datastore.varhistory.variable(name)
+    dataStoreConnectorGetVarHistory.readonly = True
+
+    def dataStoreConnectorExpandPythonRef(self, command, params):
+        config_data_dict = params[0]
+        varname = params[1]
+        expr = params[2]
+
+        config_data = command.remotedatastores.receive_datastore(config_data_dict)
+
+        varparse = bb.data_smart.VariableParse(varname, config_data)
+        return varparse.python_sub(expr)
+
+    def dataStoreConnectorRelease(self, command, params):
+        dsindex = params[0]
+        if dsindex <= 0:
+            raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex)
+        command.remotedatastores.release(dsindex)
+
+    def dataStoreConnectorSetVarFlag(self, command, params):
+        dsindex = params[0]
+        name = params[1]
+        flag = params[2]
+        value = params[3]
+        datastore = command.remotedatastores[dsindex]
+        datastore.setVarFlag(name, flag, value)
+
+    def dataStoreConnectorDelVar(self, command, params):
+        dsindex = params[0]
+        name = params[1]
+        datastore = command.remotedatastores[dsindex]
+        if len(params) > 2:
+            flag = params[2]
+            datastore.delVarFlag(name, flag)
+        else:
+            datastore.delVar(name)
+
+    def dataStoreConnectorRenameVar(self, command, params):
+        dsindex = params[0]
+        name = params[1]
+        newname = params[2]
+        datastore = command.remotedatastores[dsindex]
+        datastore.renameVar(name, newname)
+
+    def parseRecipeFile(self, command, params):
+        """
+        Parse the specified recipe file (with or without bbappends)
+        and return a datastore object representing the environment
+        for the recipe.
+        """
+        fn = params[0]
+        appends = params[1]
+        appendlist = params[2]
+        if len(params) > 3:
+            config_data_dict = params[3]
+            config_data = command.remotedatastores.receive_datastore(config_data_dict)
+        else:
+            config_data = None
+
+        if appends:
+            if appendlist is not None:
+                appendfiles = appendlist
+            else:
+                appendfiles = command.cooker.collection.get_file_appends(fn)
+        else:
+            appendfiles = []
+        # We are calling bb.cache locally here rather than on the server,
+        # but that's OK because it doesn't actually need anything from
+        # the server barring the global datastore (which we have a remote
+        # version of)
+        if config_data:
+            # We have to use a different function here if we're passing in a datastore
+            # NOTE: we took a copy above, so we don't do it here again
+            envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)['']
+        else:
+            # Use the standard path
+            parser = bb.cache.NoCache(command.cooker.databuilder)
+            envdata = parser.loadDataFull(fn, appendfiles)
+        idx = command.remotedatastores.store(envdata)
+        return DataStoreConnectionHandle(idx)
+    parseRecipeFile.readonly = True
+
 class CommandsAsync:
     """
     A class of asynchronous commands
@@ -311,8 +587,15 @@ class CommandsAsync:
         """
         bfile = params[0]
         task = params[1]
+        if len(params) > 2:
+            internal = params[2]
+        else:
+            internal = False
 
-        command.cooker.buildFile(bfile, task)
+        if internal:
+            command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True)
+        else:
+            command.cooker.buildFile(bfile, task)
     buildFile.needcache = False
 
     def buildTargets(self, command, params):
@@ -362,17 +645,6 @@ class CommandsAsync:
         command.finishAsyncCommand()
     generateTargetsTree.needcache = True
 
-    def findCoreBaseFiles(self, command, params):
-        """
-        Find certain files in COREBASE directory. i.e. Layers
-        """
-        subdir = params[0]
-        filename = params[1]
-
-        command.cooker.findCoreBaseFiles(subdir, filename)
-        command.finishAsyncCommand()
-    findCoreBaseFiles.needcache = False
-
     def findConfigFiles(self, command, params):
         """
         Find config files which provide appropriate values
@@ -472,3 +744,22 @@ class CommandsAsync:
         command.finishAsyncCommand()
     resetCooker.needcache = False
 
+    def clientComplete(self, command, params):
+        """
+        Do the right thing when the controlling client exits
+        """
+        command.cooker.clientComplete()
+        command.finishAsyncCommand()
+    clientComplete.needcache = False
+
+    def findSigInfo(self, command, params):
+        """
+        Find signature info files via the signature generator
+        """
+        pn = params[0]
+        taskname = params[1]
+        sigs = params[2]
+        res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data)
+        bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data)
+        command.finishAsyncCommand()
+    findSigInfo.needcache = False

File diff suppressed because it is too large
+ 269 - 302
bitbake/lib/bb/cooker.py


+ 77 - 23
bitbake/lib/bb/cookerdata.py

@@ -41,10 +41,6 @@ class ConfigParameters(object):
 
         self.options.pkgs_to_build = targets or []
 
-        self.options.tracking = False
-        if hasattr(self.options, "show_environment") and self.options.show_environment:
-            self.options.tracking = True
-
         for key, val in self.options.__dict__.items():
             setattr(self, key, val)
 
@@ -73,15 +69,15 @@ class ConfigParameters(object):
 
     def updateToServer(self, server, environment):
         options = {}
-        for o in ["abort", "tryaltconfigs", "force", "invalidate_stamp", 
-                  "verbose", "debug", "dry_run", "dump_signatures", 
+        for o in ["abort", "force", "invalidate_stamp",
+                  "verbose", "debug", "dry_run", "dump_signatures",
                   "debug_domains", "extra_assume_provided", "profile",
-                  "prefile", "postfile"]:
+                  "prefile", "postfile", "server_timeout"]:
             options[o] = getattr(self.options, o)
 
-        ret, error = server.runCommand(["updateConfig", options, environment])
+        ret, error = server.runCommand(["updateConfig", options, environment, sys.argv])
         if error:
-                raise Exception("Unable to update the server configuration with local parameters: %s" % error)
+            raise Exception("Unable to update the server configuration with local parameters: %s" % error)
 
     def parseActions(self):
         # Parse any commandline into actions
@@ -131,8 +127,6 @@ class CookerConfiguration(object):
         self.extra_assume_provided = []
         self.prefile = []
         self.postfile = []
-        self.prefile_server = []
-        self.postfile_server = []
         self.debug = 0
         self.cmd = None
         self.abort = True
@@ -144,8 +138,12 @@ class CookerConfiguration(object):
         self.dump_signatures = []
         self.dry_run = False
         self.tracking = False
-        self.interface = []
+        self.xmlrpcinterface = []
+        self.server_timeout = None
         self.writeeventlog = False
+        self.server_only = False
+        self.limited_deps = False
+        self.runall = None
 
         self.env = {}
 
@@ -154,7 +152,6 @@ class CookerConfiguration(object):
             if key in parameters.options.__dict__:
                 setattr(self, key, parameters.options.__dict__[key])
         self.env = parameters.environment.copy()
-        self.tracking = parameters.tracking
 
     def setServerRegIdleCallback(self, srcb):
         self.server_register_idlecallback = srcb
@@ -170,7 +167,7 @@ class CookerConfiguration(object):
 
     def __setstate__(self,state):
         for k in state:
-            setattr(self, k, state[k]) 
+            setattr(self, k, state[k])
 
 
 def catch_parse_error(func):
@@ -212,7 +209,7 @@ def _inherit(bbclass, data):
 
 def findConfigFile(configfile, data):
     search = []
-    bbpath = data.getVar("BBPATH", True)
+    bbpath = data.getVar("BBPATH")
     if bbpath:
         for i in bbpath.split(":"):
             search.append(os.path.join(i, "conf", configfile))
@@ -227,6 +224,27 @@ def findConfigFile(configfile, data):
 
     return None
 
+#
+# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working 
+# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
+#
+
+def findTopdir():
+    d = bb.data.init()
+    bbpath = None
+    if 'BBPATH' in os.environ:
+        bbpath = os.environ['BBPATH']
+        d.setVar('BBPATH', bbpath)
+
+    layerconf = findConfigFile("bblayers.conf", d)
+    if layerconf:
+        return os.path.dirname(os.path.dirname(layerconf))
+    if bbpath:
+        bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
+        if bitbakeconf:
+            return os.path.dirname(os.path.dirname(bitbakeconf))
+    return None
+
 class CookerDataBuilder(object):
 
     def __init__(self, cookercfg, worker = False):
@@ -252,7 +270,7 @@ class CookerDataBuilder(object):
         filtered_keys = bb.utils.approved_variables()
         bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
         self.basedata.setVar("BB_ORIGENV", self.savedenv)
-        
+
         if worker:
             self.basedata.setVar("BB_WORKERCONTEXT", "1")
 
@@ -286,11 +304,13 @@ class CookerDataBuilder(object):
             self.data_hash = self.data.get_hash()
             self.mcdata[''] = self.data
 
-            multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split()
+            multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
             for config in multiconfig:
-                mcdata = self.parseConfigurationFiles(['conf/multiconfig/%s.conf' % config] + self.prefiles, self.postfiles)
+                mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
                 bb.event.fire(bb.event.ConfigParsed(), mcdata)
                 self.mcdata[config] = mcdata
+            if multiconfig:
+                bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
 
         except (SyntaxError, bb.BBHandledException):
             raise bb.BBHandledException
@@ -301,11 +321,24 @@ class CookerDataBuilder(object):
             logger.exception("Error parsing configuration files")
             raise bb.BBHandledException
 
+        # Create a copy so we can reset at a later date when UIs disconnect
+        self.origdata = self.data
+        self.data = bb.data.createCopy(self.origdata)
+        self.mcdata[''] = self.data
+
+    def reset(self):
+        # We may not have run parseBaseConfiguration() yet
+        if not hasattr(self, 'origdata'):
+            return
+        self.data = bb.data.createCopy(self.origdata)
+        self.mcdata[''] = self.data
+
     def _findLayerConf(self, data):
         return findConfigFile("bblayers.conf", data)
 
-    def parseConfigurationFiles(self, prefiles, postfiles):
+    def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
         data = bb.data.createCopy(self.basedata)
+        data.setVar("BB_CURRENT_MC", mc)
 
         # Parse files for loading *before* bitbake.conf and any includes
         for f in prefiles:
@@ -319,7 +352,7 @@ class CookerDataBuilder(object):
             data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
             data = parse_config_file(layerconf, data)
 
-            layers = (data.getVar('BBLAYERS', True) or "").split()
+            layers = (data.getVar('BBLAYERS') or "").split()
 
             data = bb.data.createCopy(data)
             approved = bb.utils.approved_variables()
@@ -342,7 +375,28 @@ class CookerDataBuilder(object):
             data.delVar('LAYERDIR_RE')
             data.delVar('LAYERDIR')
 
-        if not data.getVar("BBPATH", True):
+            bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
+            collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
+            invalid = []
+            for entry in bbfiles_dynamic:
+                parts = entry.split(":", 1)
+                if len(parts) != 2:
+                    invalid.append(entry)
+                    continue
+                l, f = parts
+                if l in collections:
+                    data.appendVar("BBFILES", " " + f)
+            if invalid:
+                bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n    %s" % "\n    ".join(invalid))
+
+            layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
+            for c in collections:
+                compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
+                if compat and not (compat & layerseries):
+                    bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
+                              % (c, " ".join(layerseries), " ".join(compat)))
+
+        if not data.getVar("BBPATH"):
             msg = "The BBPATH variable is not set"
             if not layerconf:
                 msg += (" and bitbake did not find a conf/bblayers.conf file in"
@@ -357,7 +411,7 @@ class CookerDataBuilder(object):
             data = parse_config_file(p, data)
 
         # Handle any INHERITs and inherit the base class
-        bbclasses  = ["base"] + (data.getVar('INHERIT', True) or "").split()
+        bbclasses  = ["base"] + (data.getVar('INHERIT') or "").split()
         for bbclass in bbclasses:
             data = _inherit(bbclass, data)
 
@@ -369,7 +423,7 @@ class CookerDataBuilder(object):
                 parselog.critical("Undefined event handler function '%s'" % var)
                 sys.exit(1)
             handlerln = int(data.getVarFlag(var, "lineno", False))
-            bb.event.register(var, data.getVar(var, False),  (data.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
+            bb.event.register(var, data.getVar(var, False),  (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
 
         data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
 

+ 32 - 143
bitbake/lib/bb/daemonize.py

@@ -1,48 +1,14 @@
 """
 Python Daemonizing helper
 
-Configurable daemon behaviors:
-
-    1.) The current working directory set to the "/" directory.
-    2.) The current file creation mode mask set to 0.
-    3.) Close all open files (1024). 
-    4.) Redirect standard I/O streams to "/dev/null".
-
-A failed call to fork() now raises an exception.
-
-References:
-    1) Advanced Programming in the Unix Environment: W. Richard Stevens
-	http://www.apuebook.com/apue3e.html
-    2) The Linux Programming Interface: Michael Kerrisk
-	http://man7.org/tlpi/index.html
-    3) Unix Programming Frequently Asked Questions:
-	http://www.faqs.org/faqs/unix-faq/programmer/faq/
-
-Modified to allow a function to be daemonized and return for 
-bitbake use by Richard Purdie
+Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
+to allow a function to be daemonized and return for bitbake use by Richard Purdie
 """
 
-__author__ = "Chad J. Schroeder"
-__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
-__version__ = "0.2"
-
-# Standard Python modules.
-import os                    # Miscellaneous OS interfaces.
-import sys                   # System-specific parameters and functions.
-
-# Default daemon parameters.
-# File mode creation mask of the daemon.
-# For BitBake's children, we do want to inherit the parent umask.
-UMASK = None
-
-# Default maximum for the number of available file descriptors.
-MAXFD = 1024
-
-# The standard I/O file descriptors are redirected to /dev/null by default.
-if (hasattr(os, "devnull")):
-    REDIRECT_TO = os.devnull
-else:
-    REDIRECT_TO = "/dev/null"
+import os
+import sys
+import io
+import traceback
 
 def createDaemon(function, logfile):
     """
@@ -65,36 +31,6 @@ def createDaemon(function, logfile):
         # leader of the new process group, we call os.setsid().  The process is
         # also guaranteed not to have a controlling terminal.
         os.setsid()
-
-        # Is ignoring SIGHUP necessary?
-        #
-        # It's often suggested that the SIGHUP signal should be ignored before
-        # the second fork to avoid premature termination of the process.  The
-        # reason is that when the first child terminates, all processes, e.g.
-        # the second child, in the orphaned group will be sent a SIGHUP.
-        #
-        # "However, as part of the session management system, there are exactly
-        # two cases where SIGHUP is sent on the death of a process:
-        #
-        #    1) When the process that dies is the session leader of a session that
-        #        is attached to a terminal device, SIGHUP is sent to all processes
-        #        in the foreground process group of that terminal device.
-        #    2) When the death of a process causes a process group to become
-        #        orphaned, and one or more processes in the orphaned group are
-        #        stopped, then SIGHUP and SIGCONT are sent to all members of the
-        #        orphaned group." [2]
-        #
-        # The first case can be ignored since the child is guaranteed not to have
-        # a controlling terminal.  The second case isn't so easy to dismiss.
-        # The process group is orphaned when the first child terminates and
-        # POSIX.1 requires that every STOPPED process in an orphaned process
-        # group be sent a SIGHUP signal followed by a SIGCONT signal.  Since the
-        # second child is not STOPPED though, we can safely forego ignoring the
-        # SIGHUP signal.  In any case, there are no ill-effects if it is ignored.
-        #
-        # import signal              # Set handlers for asynchronous events.
-        # signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
         try:
             # Fork a second child and exit immediately to prevent zombies.  This
             # causes the second child process to be orphaned, making the init
@@ -108,86 +44,39 @@ def createDaemon(function, logfile):
         except OSError as e:
             raise Exception("%s [%d]" % (e.strerror, e.errno))
 
-        if (pid == 0):  # The second child.
-            # We probably don't want the file mode creation mask inherited from
-            # the parent, so we give the child complete control over permissions.
-            if UMASK is not None:
-                os.umask(UMASK)
-        else:
+        if (pid != 0):
             # Parent (the first child) of the second child.
+            # exit() or _exit()?
+            # _exit is like exit(), but it doesn't call any functions registered
+            # with atexit (and on_exit) or any registered signal handlers.  It also
+            # closes any open file descriptors.  Using exit() may cause all stdio
+            # streams to be flushed twice and any temporary files may be unexpectedly
+            # removed.  It's therefore recommended that child branches of a fork()
+            # and the parent branch(es) of a daemon use _exit().
             os._exit(0)
     else:
-        # exit() or _exit()?
-        # _exit is like exit(), but it doesn't call any functions registered
-        # with atexit (and on_exit) or any registered signal handlers.  It also
-        # closes any open file descriptors.  Using exit() may cause all stdio
-        # streams to be flushed twice and any temporary files may be unexpectedly
-        # removed.  It's therefore recommended that child branches of a fork()
-        # and the parent branch(es) of a daemon use _exit().
+        os.waitpid(pid, 0)
         return
 
-    # Close all open file descriptors.  This prevents the child from keeping
-    # open any file descriptors inherited from the parent.  There is a variety
-    # of methods to accomplish this task.  Three are listed below.
-    #
-    # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
-    # number of open file descriptors to close.  If it doesn't exist, use
-    # the default value (configurable).
-    #
-    # try:
-    #     maxfd = os.sysconf("SC_OPEN_MAX")
-    # except (AttributeError, ValueError):
-    #     maxfd = MAXFD
-    #
-    # OR
-    #
-    # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
-    #     maxfd = os.sysconf("SC_OPEN_MAX")
-    # else:
-    #     maxfd = MAXFD
-    #
-    # OR
-    #
-    # Use the getrlimit method to retrieve the maximum file descriptor number
-    # that can be opened by this process.  If there is no limit on the
-    # resource, use the default value.
-    #
-    import resource             # Resource usage information.
-    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
-    if (maxfd == resource.RLIM_INFINITY):
-        maxfd = MAXFD
-  
-    # Iterate through and close all file descriptors.
-#    for fd in range(0, maxfd):
-#        try:
-#            os.close(fd)
-#        except OSError:        # ERROR, fd wasn't open to begin with (ignored)
-#            pass
-
-    # Redirect the standard I/O file descriptors to the specified file.  Since
-    # the daemon has no controlling terminal, most daemons redirect stdin,
-    # stdout, and stderr to /dev/null.  This is done to prevent side-effects
-    # from reads and writes to the standard I/O file descriptors.
-
-    # This call to open is guaranteed to return the lowest file descriptor,
-    # which will be 0 (stdin), since it was closed above.
-#    os.open(REDIRECT_TO, os.O_RDWR)    # standard input (0)
-
-    # Duplicate standard input to standard output and standard error.
-#    os.dup2(0, 1)                      # standard output (1)
-#    os.dup2(0, 2)                      # standard error (2)
-
+    # The second child.
 
+    # Replace standard fds with our own
     si = open('/dev/null', 'r')
-    so = open(logfile, 'w')
-    se = so
-
-
-    # Replace those fds with our own
     os.dup2(si.fileno(), sys.stdin.fileno())
-    os.dup2(so.fileno(), sys.stdout.fileno())
-    os.dup2(se.fileno(), sys.stderr.fileno())
 
-    function()
+    try:
+        so = open(logfile, 'a+')
+        se = so
+        os.dup2(so.fileno(), sys.stdout.fileno())
+        os.dup2(se.fileno(), sys.stderr.fileno())
+    except io.UnsupportedOperation:
+        sys.stdout = open(logfile, 'a+')
+        sys.stderr = sys.stdout
 
-    os._exit(0)
+    try:
+        function()
+    except Exception as e:
+        traceback.print_exc()
+    finally:
+        bb.event.print_ui_queue()
+        os._exit(0)

+ 28 - 75
bitbake/lib/bb/data.py

@@ -78,59 +78,6 @@ def initVar(var, d):
     """Non-destructive var init for data structure"""
     d.initVar(var)
 
-
-def setVar(var, value, d):
-    """Set a variable to a given value"""
-    d.setVar(var, value)
-
-
-def getVar(var, d, exp = False):
-    """Gets the value of a variable"""
-    return d.getVar(var, exp)
-
-
-def renameVar(key, newkey, d):
-    """Renames a variable from key to newkey"""
-    d.renameVar(key, newkey)
-
-def delVar(var, d):
-    """Removes a variable from the data set"""
-    d.delVar(var)
-
-def appendVar(var, value, d):
-    """Append additional value to a variable"""
-    d.appendVar(var, value)
-
-def setVarFlag(var, flag, flagvalue, d):
-    """Set a flag for a given variable to a given value"""
-    d.setVarFlag(var, flag, flagvalue)
-
-def getVarFlag(var, flag, d):
-    """Gets given flag from given var"""
-    return d.getVarFlag(var, flag, False)
-
-def delVarFlag(var, flag, d):
-    """Removes a given flag from the variable's flags"""
-    d.delVarFlag(var, flag)
-
-def setVarFlags(var, flags, d):
-    """Set the flags for a given variable
-
-    Note:
-        setVarFlags will not clear previous
-        flags. Think of this method as
-        addVarFlags
-    """
-    d.setVarFlags(var, flags)
-
-def getVarFlags(var, d):
-    """Gets a variable's flags"""
-    return d.getVarFlags(var)
-
-def delVarFlags(var, d):
-    """Removes a variable's flags"""
-    d.delVarFlags(var)
-
 def keys(d):
     """Return a list of keys in d"""
     return d.keys()
@@ -174,7 +121,7 @@ def inheritFromOS(d, savedenv, permitted):
     for s in savedenv.keys():
         if s in permitted:
             try:
-                d.setVar(s, savedenv.getVar(s, True), op = 'from env')
+                d.setVar(s, savedenv.getVar(s), op = 'from env')
                 if s in exportlist:
                     d.setVarFlag(s, "export", True, op = 'auto env export')
             except TypeError:
@@ -194,7 +141,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
     try:
         if all:
             oval = d.getVar(var, False)
-        val = d.getVar(var, True)
+        val = d.getVar(var)
     except (KeyboardInterrupt, bb.build.FuncFailed):
         raise
     except Exception as exc:
@@ -249,7 +196,7 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
     keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
     grouped = groupby(keys, isfunc)
     for isfunc, keys in grouped:
-        for key in keys:
+        for key in sorted(keys):
             emit_var(key, o, d, all and not isfunc) and o.write('\n')
 
 def exported_keys(d):
@@ -258,11 +205,13 @@ def exported_keys(d):
                                       not d.getVarFlag(key, 'unexport', False))
 
 def exported_vars(d):
-    for key in exported_keys(d):
+    k = list(exported_keys(d))
+    for key in k:
         try:
-            value = d.getVar(key, True)
-        except Exception:
-            pass
+            value = d.getVar(key)
+        except Exception as err:
+            bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err))
+            continue
 
         if value is not None:
             yield key, str(value)
@@ -271,13 +220,13 @@ def emit_func(func, o=sys.__stdout__, d = init()):
     """Emits all items in the data store in a format such that it can be sourced by a shell."""
 
     keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
-    for key in keys:
+    for key in sorted(keys):
         emit_var(key, o, d, False)
 
     o.write('\n')
     emit_var(func, o, d, False) and o.write('\n')
-    newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
-    newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
+    newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
+    newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
     seen = set()
     while newdeps:
         deps = newdeps
@@ -286,8 +235,8 @@ def emit_func(func, o=sys.__stdout__, d = init()):
         for dep in deps:
             if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
                emit_var(dep, o, d, False) and o.write('\n')
-               newdeps |=  bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
-               newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
+               newdeps |=  bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
+               newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
         newdeps -= seen
 
 _functionfmt = """
@@ -310,7 +259,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
     pp = bb.codeparser.PythonParser(func, logger)
     pp.parse_python(d.getVar(func, False))
     newdeps = pp.execs
-    newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
+    newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
     seen = set()
     while newdeps:
         deps = newdeps
@@ -322,7 +271,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
                pp = bb.codeparser.PythonParser(dep, logger)
                pp.parse_python(d.getVar(dep, False))
                newdeps |= pp.execs
-               newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
+               newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
         newdeps -= seen
 
 def update_data(d):
@@ -341,17 +290,19 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
             return deps, value
         varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
         vardeps = varflags.get("vardeps")
-        value = d.getVar(key, False)
+        value = d.getVarFlag(key, "_content", False)
 
         def handle_contains(value, contains, d):
             newvalue = ""
             for k in sorted(contains):
-                l = (d.getVar(k, True) or "").split()
-                for word in sorted(contains[k]):
-                    if word in l:
-                        newvalue += "\n%s{%s} = Set" %  (k, word)
+                l = (d.getVar(k) or "").split()
+                for item in sorted(contains[k]):
+                    for word in item.split():
+                        if not word in l:
+                            newvalue += "\n%s{%s} = Unset" % (k, item)
+                            break
                     else:
-                        newvalue += "\n%s{%s} = Unset" %  (k, word)
+                        newvalue += "\n%s{%s} = Set" % (k, item)
             if not newvalue:
                 return value
             if not value:
@@ -364,7 +315,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
             if varflags.get("python"):
                 parser = bb.codeparser.PythonParser(key, logger)
                 if value and "\t" in value:
-                    logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
+                    logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE")))
                 parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
                 deps = deps | parser.references
                 deps = deps | (keys & parser.execs)
@@ -408,6 +359,8 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
 
         deps |= set((vardeps or "").split())
         deps -= set(varflags.get("vardepsexclude", "").split())
+    except bb.parse.SkipRecipe:
+        raise
     except Exception as e:
         bb.warn("Exception during build_dependencies for %s" % key)
         raise
@@ -419,7 +372,7 @@ def generate_dependencies(d):
 
     keys = set(key for key in d if not key.startswith("__"))
     shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
-    varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
+    varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
 
     deps = {}
     values = {}

+ 103 - 34
bitbake/lib/bb/data_smart.py

@@ -39,7 +39,7 @@ from bb.COW  import COWDictBase
 logger = logging.getLogger("BitBake.Data")
 
 __setvar_keyword__ = ["_append", "_prepend", "_remove"]
-__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$')
+__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
 __expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
 __expand_python_regexp__ = re.compile(r"\${@.+?}")
 
@@ -108,7 +108,7 @@ class VariableParse:
                 varparse = self.d.expand_cache[key]
                 var = varparse.value
             else:
-                var = self.d.getVarFlag(key, "_content", True)
+                var = self.d.getVarFlag(key, "_content")
             self.references.add(key)
             if var is not None:
                 return var
@@ -116,13 +116,21 @@ class VariableParse:
                 return match.group()
 
     def python_sub(self, match):
-            code = match.group()[3:-1]
+            if isinstance(match, str):
+                code = match
+            else:
+                code = match.group()[3:-1]
+
+            if "_remote_data" in self.d:
+                connector = self.d["_remote_data"]
+                return connector.expandPythonRef(self.varname, code, self.d)
+
             codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
 
             parser = bb.codeparser.PythonParser(self.varname, logger)
             parser.parse_python(code)
             if self.varname:
-                vardeps = self.d.getVarFlag(self.varname, "vardeps", True)
+                vardeps = self.d.getVarFlag(self.varname, "vardeps")
                 if vardeps is None:
                     parser.log.flush()
             else:
@@ -146,7 +154,7 @@ class DataContext(dict):
         self['d'] = metadata
 
     def __missing__(self, key):
-        value = self.metadata.getVar(key, True)
+        value = self.metadata.getVar(key)
         if value is None or self.metadata.getVarFlag(key, 'func', False):
             raise KeyError(key)
         else:
@@ -222,6 +230,19 @@ class VariableHistory(object):
         new.variables = self.variables.copy()
         return new
 
+    def __getstate__(self):
+        vardict = {}
+        for k, v in self.variables.iteritems():
+            vardict[k] = v
+        return {'dataroot': self.dataroot,
+                'variables': vardict}
+
+    def __setstate__(self, state):
+        self.dataroot = state['dataroot']
+        self.variables = COWDictBase.copy()
+        for k, v in state['variables'].items():
+            self.variables[k] = v
+
     def record(self, *kwonly, **loginfo):
         if not self.dataroot._tracking:
             return
@@ -247,10 +268,15 @@ class VariableHistory(object):
         self.variables[var].append(loginfo.copy())
 
     def variable(self, var):
-        if var in self.variables:
-            return self.variables[var]
+        remote_connector = self.dataroot.getVar('_remote_data', False)
+        if remote_connector:
+            varhistory = remote_connector.getVarHistory(var)
         else:
-            return []
+            varhistory = []
+
+        if var in self.variables:
+            varhistory.extend(self.variables[var])
+        return varhistory
 
     def emit(self, var, oval, val, o, d):
         history = self.variable(var)
@@ -318,7 +344,7 @@ class VariableHistory(object):
         the files in which they were added.
         """
         history = self.variable(var)
-        finalitems = (d.getVar(var, True) or '').split()
+        finalitems = (d.getVar(var) or '').split()
         filemap = {}
         isset = False
         for event in history:
@@ -426,11 +452,11 @@ class DataSmart(MutableMapping):
             # Can end up here recursively so setup dummy values
             self.overrides = []
             self.overridesset = set()
-            self.overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
+            self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
             self.overridesset = set(self.overrides)
             self.inoverride = False
             self.expand_cache = {}
-            newoverrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
+            newoverrides = (self.getVar("OVERRIDES") or "").split(":") or []
             if newoverrides == self.overrides:
                 break
             self.overrides = newoverrides
@@ -447,17 +473,22 @@ class DataSmart(MutableMapping):
         dest = self.dict
         while dest:
             if var in dest:
-                return dest[var]
+                return dest[var], self.overridedata.get(var, None)
+
+            if "_remote_data" in dest:
+                connector = dest["_remote_data"]["_content"]
+                return connector.getVar(var)
 
             if "_data" not in dest:
                 break
             dest = dest["_data"]
+        return None, self.overridedata.get(var, None)
 
     def _makeShadowCopy(self, var):
         if var in self.dict:
             return
 
-        local_var = self._findVar(var)
+        local_var, _ = self._findVar(var)
 
         if local_var:
             self.dict[var] = copy.copy(local_var)
@@ -471,6 +502,12 @@ class DataSmart(MutableMapping):
         if 'parsing' in loginfo:
             parsing=True
 
+        if '_remote_data' in self.dict:
+            connector = self.dict["_remote_data"]["_content"]
+            res = connector.setVar(var, value)
+            if not res:
+                return
+
         if 'op' not in loginfo:
             loginfo['op'] = "set"
         self.expand_cache = {}
@@ -509,6 +546,8 @@ class DataSmart(MutableMapping):
                 del self.dict[var]["_append"]
             if "_prepend" in self.dict[var]:
                 del self.dict[var]["_prepend"]
+            if "_remove" in self.dict[var]:
+                del self.dict[var]["_remove"]
             if var in self.overridedata:
                 active = []
                 self.need_overrides()
@@ -541,7 +580,7 @@ class DataSmart(MutableMapping):
             nextnew = set()
             self.overridevars.update(new)
             for i in new:
-                vardata = self.expandWithRefs(self.getVar(i, True), i)
+                vardata = self.expandWithRefs(self.getVar(i), i)
                 nextnew.update(vardata.references)
                 nextnew.update(vardata.contains.keys())
             new = nextnew
@@ -565,13 +604,19 @@ class DataSmart(MutableMapping):
                 if len(shortvar) == 0:
                     override = None
 
-    def getVar(self, var, expand, noweakdefault=False, parsing=False):
+    def getVar(self, var, expand=True, noweakdefault=False, parsing=False):
         return self.getVarFlag(var, "_content", expand, noweakdefault, parsing)
 
     def renameVar(self, key, newkey, **loginfo):
         """
         Rename the variable key to newkey
         """
+        if '_remote_data' in self.dict:
+            connector = self.dict["_remote_data"]["_content"]
+            res = connector.renameVar(key, newkey)
+            if not res:
+                return
+
         val = self.getVar(key, 0, parsing=True)
         if val is not None:
             loginfo['variable'] = newkey
@@ -615,6 +660,12 @@ class DataSmart(MutableMapping):
         self.setVar(var + "_prepend", value, ignore=True, parsing=True)
 
     def delVar(self, var, **loginfo):
+        if '_remote_data' in self.dict:
+            connector = self.dict["_remote_data"]["_content"]
+            res = connector.delVar(var)
+            if not res:
+                return
+
         loginfo['detail'] = ""
         loginfo['op'] = 'del'
         self.varhistory.record(**loginfo)
@@ -641,6 +692,12 @@ class DataSmart(MutableMapping):
                          override = None
 
     def setVarFlag(self, var, flag, value, **loginfo):
+        if '_remote_data' in self.dict:
+            connector = self.dict["_remote_data"]["_content"]
+            res = connector.setVarFlag(var, flag, value)
+            if not res:
+                return
+
         self.expand_cache = {}
         if 'op' not in loginfo:
             loginfo['op'] = "set"
@@ -662,14 +719,14 @@ class DataSmart(MutableMapping):
                 self.dict["__exportlist"]["_content"] = set()
             self.dict["__exportlist"]["_content"].add(var)
 
-    def getVarFlag(self, var, flag, expand, noweakdefault=False, parsing=False):
-        local_var = self._findVar(var)
+    def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False):
+        local_var, overridedata = self._findVar(var)
         value = None
-        if flag == "_content" and var in self.overridedata and not parsing:
+        if flag == "_content" and overridedata is not None and not parsing:
             match = False
             active = {}
             self.need_overrides()
-            for (r, o) in self.overridedata[var]:
+            for (r, o) in overridedata:
                 # What about double overrides both with "_" in the name?
                 if o in self.overridesset:
                     active[o] = r
@@ -748,18 +805,25 @@ class DataSmart(MutableMapping):
                 if match:
                     removes.extend(self.expand(r).split())
 
-            filtered = filter(lambda v: v not in removes,
-                              value.split())
-            value = " ".join(filtered)
-            if expand and var in self.expand_cache:
-                 # We need to ensure the expand cache has the correct value
-                 # flag == "_content" here
-                self.expand_cache[var].value = value
+            if removes:
+                filtered = filter(lambda v: v not in removes,
+                                  value.split())
+                value = " ".join(filtered)
+                if expand and var in self.expand_cache:
+                    # We need to ensure the expand cache has the correct value
+                    # flag == "_content" here
+                    self.expand_cache[var].value = value
         return value
 
     def delVarFlag(self, var, flag, **loginfo):
+        if '_remote_data' in self.dict:
+            connector = self.dict["_remote_data"]["_content"]
+            res = connector.delVarFlag(var, flag)
+            if not res:
+                return
+
         self.expand_cache = {}
-        local_var = self._findVar(var)
+        local_var, _ = self._findVar(var)
         if not local_var:
             return
         if not var in self.dict:
@@ -802,7 +866,7 @@ class DataSmart(MutableMapping):
             self.dict[var][i] = flags[i]
 
     def getVarFlags(self, var, expand = False, internalflags=False):
-        local_var = self._findVar(var)
+        local_var, _ = self._findVar(var)
         flags = {}
 
         if local_var:
@@ -844,7 +908,7 @@ class DataSmart(MutableMapping):
         data = DataSmart()
         data.dict["_data"] = self.dict
         data.varhistory = self.varhistory.copy()
-        data.varhistory.datasmart = data
+        data.varhistory.dataroot = data
         data.inchistory = self.inchistory.copy()
 
         data._tracking = self._tracking
@@ -875,7 +939,7 @@ class DataSmart(MutableMapping):
 
     def localkeys(self):
         for key in self.dict:
-            if key != '_data':
+            if key not in ['_data', '_remote_data']:
                 yield key
 
     def __iter__(self):
@@ -884,7 +948,7 @@ class DataSmart(MutableMapping):
         def keylist(d):        
             klist = set()
             for key in d:
-                if key == "_data":
+                if key in ["_data", "_remote_data"]:
                     continue
                 if key in deleted:
                     continue
@@ -898,6 +962,13 @@ class DataSmart(MutableMapping):
             if "_data" in d:
                 klist |= keylist(d["_data"])
 
+            if "_remote_data" in d:
+                connector = d["_remote_data"]["_content"]
+                for key in connector.getKeys():
+                    if key in deleted:
+                        continue
+                    klist.add(key)
+
             return klist
 
         self.need_overrides()
@@ -935,9 +1006,8 @@ class DataSmart(MutableMapping):
         data = {}
         d = self.createCopy()
         bb.data.expandKeys(d)
-        bb.data.update_data(d)
 
-        config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split())
+        config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
         keys = set(key for key in iter(d) if not key.startswith("__"))
         for key in keys:
             if key in config_whitelist:
@@ -956,7 +1026,6 @@ class DataSmart(MutableMapping):
 
         for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
             bb_list = d.getVar(key, False) or []
-            bb_list.sort()
             data.update({key:str(bb_list)})
 
             if key == "__BBANONFUNCS":

+ 131 - 48
bitbake/lib/bb/event.py

@@ -48,6 +48,16 @@ class Event(object):
     def __init__(self):
         self.pid = worker_pid
 
+
+class HeartbeatEvent(Event):
+    """Triggered at regular time intervals of 10 seconds. Other events can fire much more often
+       (runQueueTaskStarted when there are many short tasks) or not at all for long periods
+       of time (again runQueueTaskStarted, when there is just one long-running task), so this
+       event is more suitable for doing some task-independent work occassionally."""
+    def __init__(self, time):
+        Event.__init__(self)
+        self.time = time
+
 Registered        = 10
 AlreadyRegistered = 14
 
@@ -139,23 +149,34 @@ def print_ui_queue():
 
         # First check to see if we have any proper messages
         msgprint = False
-        for event in ui_queue:
+        msgerrs = False
+
+        # Should we print to stderr?
+        for event in ui_queue[:]:
+            if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
+                msgerrs = True
+                break
+
+        if msgerrs:
+            logger.addHandler(stderr)
+        else:
+            logger.addHandler(stdout)
+
+        for event in ui_queue[:]:
             if isinstance(event, logging.LogRecord):
                 if event.levelno > logging.DEBUG:
-                    if event.levelno >= logging.WARNING:
-                        logger.addHandler(stderr)
-                    else:
-                        logger.addHandler(stdout)
                     logger.handle(event)
                     msgprint = True
-        if msgprint:
-            return
 
         # Nope, so just print all of the messages we have (including debug messages)
-        logger.addHandler(stdout)
-        for event in ui_queue:
-            if isinstance(event, logging.LogRecord):
-                logger.handle(event)
+        if not msgprint:
+            for event in ui_queue[:]:
+                if isinstance(event, logging.LogRecord):
+                    logger.handle(event)
+        if msgerrs:
+            logger.removeHandler(stderr)
+        else:
+            logger.removeHandler(stdout)
 
 def fire_ui_handlers(event, d):
     global _thread_lock
@@ -202,6 +223,12 @@ def fire(event, d):
     if worker_fire:
         worker_fire(event, d)
     else:
+        # If messages have been queued up, clear the queue
+        global _uiready, ui_queue
+        if _uiready and ui_queue:
+            for queue_event in ui_queue:
+                fire_ui_handlers(queue_event, d)
+            ui_queue = []
         fire_ui_handlers(event, d)
 
 def fire_from_worker(event, d):
@@ -254,6 +281,11 @@ def register(name, handler, mask=None, filename=None, lineno=None):
 def remove(name, handler):
     """Remove an Event handler"""
     _handlers.pop(name)
+    if name in _catchall_handlers:
+        _catchall_handlers.pop(name)
+    for event in _event_handler_map.keys():
+        if name in _event_handler_map[event]:
+            _event_handler_map[event].pop(name)
 
 def get_handlers():
     return _handlers
@@ -267,20 +299,28 @@ def set_eventfilter(func):
     _eventfilter = func
 
 def register_UIHhandler(handler, mainui=False):
-    if mainui:
-        global _uiready
-        _uiready = True
     bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
     _ui_handlers[_ui_handler_seq] = handler
     level, debug_domains = bb.msg.constructLogOptions()
     _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
+    if mainui:
+        global _uiready
+        _uiready = _ui_handler_seq
     return _ui_handler_seq
 
-def unregister_UIHhandler(handlerNum):
+def unregister_UIHhandler(handlerNum, mainui=False):
+    if mainui:
+        global _uiready
+        _uiready = False
     if handlerNum in _ui_handlers:
         del _ui_handlers[handlerNum]
     return
 
+def get_uihandler():
+    if _uiready is False:
+        return None
+    return _uiready
+
 # Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
 class UIEventFilter(object):
     def __init__(self, level, debug_domains):
@@ -343,6 +383,12 @@ class OperationProgress(Event):
 class ConfigParsed(Event):
     """Configuration Parsing Complete"""
 
+class MultiConfigParsed(Event):
+    """Multi-Config Parsing Complete"""
+    def __init__(self, mcdata):
+        self.mcdata = mcdata
+        Event.__init__(self)
+
 class RecipeEvent(Event):
     def __init__(self, fn):
         self.fn = fn
@@ -351,6 +397,17 @@ class RecipeEvent(Event):
 class RecipePreFinalise(RecipeEvent):
     """ Recipe Parsing Complete but not yet finialised"""
 
+class RecipeTaskPreProcess(RecipeEvent):
+    """
+    Recipe Tasks about to be finalised
+    The list of tasks should be final at this point and handlers
+    are only able to change interdependencies
+    """
+    def __init__(self, fn, tasklist):
+        self.fn = fn
+        self.tasklist = tasklist
+        Event.__init__(self)
+
 class RecipeParsed(RecipeEvent):
     """ Recipe Parsing Complete """
 
@@ -372,7 +429,7 @@ class StampUpdate(Event):
     targets = property(getTargets)
 
 class BuildBase(Event):
-    """Base class for bbmake run events"""
+    """Base class for bitbake build events"""
 
     def __init__(self, n, p, failures = 0):
         self._name = n
@@ -392,12 +449,6 @@ class BuildBase(Event):
     def setName(self, name):
         self._name = name
 
-    def getCfg(self):
-        return self.data
-
-    def setCfg(self, cfg):
-        self.data = cfg
-
     def getFailures(self):
         """
         Return the number of failed packages
@@ -406,9 +457,6 @@ class BuildBase(Event):
 
     pkgs = property(getPkgs, setPkgs, None, "pkgs property")
     name = property(getName, setName, None, "name property")
-    cfg = property(getCfg, setCfg, None, "cfg property")
-
-
 
 class BuildInit(BuildBase):
     """buildFile or buildTargets was invoked"""
@@ -417,13 +465,13 @@ class BuildInit(BuildBase):
         BuildBase.__init__(self, name, p)
 
 class BuildStarted(BuildBase, OperationStarted):
-    """bbmake build run started"""
+    """Event when builds start"""
     def __init__(self, n, p, failures = 0):
         OperationStarted.__init__(self, "Building Started")
         BuildBase.__init__(self, n, p, failures)
 
 class BuildCompleted(BuildBase, OperationCompleted):
-    """bbmake build run completed"""
+    """Event when builds have completed"""
     def __init__(self, total, n, p, failures=0, interrupted=0):
         if not failures:
             OperationCompleted.__init__(self, total, "Building Succeeded")
@@ -441,6 +489,23 @@ class DiskFull(Event):
         self._free = freespace
         self._mountpoint = mountpoint
 
+class DiskUsageSample:
+    def __init__(self, available_bytes, free_bytes, total_bytes):
+        # Number of bytes available to non-root processes.
+        self.available_bytes = available_bytes
+        # Number of bytes available to root processes.
+        self.free_bytes = free_bytes
+        # Total capacity of the volume.
+        self.total_bytes = total_bytes
+
+class MonitorDiskEvent(Event):
+    """If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
+       Provides information about devices that are getting monitored."""
+    def __init__(self, disk_usage):
+        Event.__init__(self)
+        # hash of device root path -> DiskUsageSample
+        self.disk_usage = disk_usage
+
 class NoProvider(Event):
     """No Provider for an Event"""
 
@@ -458,6 +523,28 @@ class NoProvider(Event):
     def isRuntime(self):
         return self._runtime
 
+    def __str__(self):
+        msg = ''
+        if self._runtime:
+            r = "R"
+        else:
+            r = ""
+
+        extra = ''
+        if not self._reasons:
+            if self._close_matches:
+                extra = ". Close matches:\n  %s" % '\n  '.join(self._close_matches)
+
+        if self._dependees:
+            msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
+        else:
+            msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
+        if self._reasons:
+            for reason in self._reasons:
+                msg += '\n' + reason
+        return msg
+
+
 class MultipleProviders(Event):
     """Multiple Providers"""
 
@@ -485,6 +572,16 @@ class MultipleProviders(Event):
         """
         return self._candidates
 
+    def __str__(self):
+        msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
+                            self._item,
+                            ", ".join(self._candidates))
+        rtime = ""
+        if self._is_runtime:
+            rtime = "R"
+        msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
+        return msg
+
 class ParseStarted(OperationStarted):
     """Recipe parsing for the runqueue has begun"""
     def __init__(self, total):
@@ -578,14 +675,6 @@ class FilesMatchingFound(Event):
         self._pattern = pattern
         self._matches = matches
 
-class CoreBaseFilesFound(Event):
-    """
-    Event when a list of appropriate config files has been generated
-    """
-    def __init__(self, paths):
-        Event.__init__(self)
-        self._paths = paths
-
 class ConfigFilesFound(Event):
     """
     Event when a list of appropriate config files has been generated
@@ -656,19 +745,6 @@ class LogHandler(logging.Handler):
         record.taskpid = worker_pid
         return True
 
-class RequestPackageInfo(Event):
-    """
-    Event to request package information
-    """
-
-class PackageInfo(Event):
-    """
-    Package information for GUI
-    """
-    def __init__(self, pkginfolist):
-        Event.__init__(self)
-        self._pkginfolist = pkginfolist
-
 class MetadataEvent(Event):
     """
     Generic event that target for OE-Core classes
@@ -746,3 +822,10 @@ class NetworkTestFailed(Event):
     Event to indicate network test has failed
     """
 
+class FindSigInfoResult(Event):
+    """
+    Event to return results from findSigInfo command
+    """
+    def __init__(self, result):
+        Event.__init__(self)
+        self.result = result

+ 219 - 143
bitbake/lib/bb/fetch2/__init__.py

@@ -35,10 +35,11 @@ import operator
 import collections
 import subprocess
 import pickle
+import errno
 import bb.persist_data, bb.utils
 import bb.checksum
-from bb import data
 import bb.process
+import bb.event
 
 __version__ = "2"
 _checksum_cache = bb.checksum.FileChecksumCache()
@@ -48,11 +49,11 @@ logger = logging.getLogger("BitBake.Fetcher")
 class BBFetchException(Exception):
     """Class all fetch exceptions inherit from"""
     def __init__(self, message):
-         self.msg = message
-         Exception.__init__(self, message)
+        self.msg = message
+        Exception.__init__(self, message)
 
     def __str__(self):
-         return self.msg
+        return self.msg
 
 class UntrustedUrl(BBFetchException):
     """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS"""
@@ -68,24 +69,24 @@ class UntrustedUrl(BBFetchException):
 class MalformedUrl(BBFetchException):
     """Exception raised when encountering an invalid url"""
     def __init__(self, url, message=''):
-         if message:
-             msg = message
-         else:
-             msg = "The URL: '%s' is invalid and cannot be interpreted" % url
-         self.url = url
-         BBFetchException.__init__(self, msg)
-         self.args = (url,)
+        if message:
+            msg = message
+        else:
+            msg = "The URL: '%s' is invalid and cannot be interpreted" % url
+        self.url = url
+        BBFetchException.__init__(self, msg)
+        self.args = (url,)
 
 class FetchError(BBFetchException):
     """General fetcher exception when something happens incorrectly"""
     def __init__(self, message, url = None):
-         if url:
+        if url:
             msg = "Fetcher failure for URL: '%s'. %s" % (url, message)
-         else:
+        else:
             msg = "Fetcher failure: %s" % message
-         self.url = url
-         BBFetchException.__init__(self, msg)
-         self.args = (message, url)
+        self.url = url
+        BBFetchException.__init__(self, msg)
+        self.args = (message, url)
 
 class ChecksumError(FetchError):
     """Exception when mismatched checksum encountered"""
@@ -99,49 +100,56 @@ class NoChecksumError(FetchError):
 class UnpackError(BBFetchException):
     """General fetcher exception when something happens incorrectly when unpacking"""
     def __init__(self, message, url):
-         msg = "Unpack failure for URL: '%s'. %s" % (url, message)
-         self.url = url
-         BBFetchException.__init__(self, msg)
-         self.args = (message, url)
+        msg = "Unpack failure for URL: '%s'. %s" % (url, message)
+        self.url = url
+        BBFetchException.__init__(self, msg)
+        self.args = (message, url)
 
 class NoMethodError(BBFetchException):
     """Exception raised when there is no method to obtain a supplied url or set of urls"""
     def __init__(self, url):
-         msg = "Could not find a fetcher which supports the URL: '%s'" % url
-         self.url = url
-         BBFetchException.__init__(self, msg)
-         self.args = (url,)
+        msg = "Could not find a fetcher which supports the URL: '%s'" % url
+        self.url = url
+        BBFetchException.__init__(self, msg)
+        self.args = (url,)
 
 class MissingParameterError(BBFetchException):
     """Exception raised when a fetch method is missing a critical parameter in the url"""
     def __init__(self, missing, url):
-         msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
-         self.url = url
-         self.missing = missing
-         BBFetchException.__init__(self, msg)
-         self.args = (missing, url)
+        msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
+        self.url = url
+        self.missing = missing
+        BBFetchException.__init__(self, msg)
+        self.args = (missing, url)
 
 class ParameterError(BBFetchException):
     """Exception raised when a url cannot be proccessed due to invalid parameters."""
     def __init__(self, message, url):
-         msg = "URL: '%s' has invalid parameters. %s" % (url, message)
-         self.url = url
-         BBFetchException.__init__(self, msg)
-         self.args = (message, url)
+        msg = "URL: '%s' has invalid parameters. %s" % (url, message)
+        self.url = url
+        BBFetchException.__init__(self, msg)
+        self.args = (message, url)
 
 class NetworkAccess(BBFetchException):
     """Exception raised when network access is disabled but it is required."""
     def __init__(self, url, cmd):
-         msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
-         self.url = url
-         self.cmd = cmd
-         BBFetchException.__init__(self, msg)
-         self.args = (url, cmd)
+        msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
+        self.url = url
+        self.cmd = cmd
+        BBFetchException.__init__(self, msg)
+        self.args = (url, cmd)
 
 class NonLocalMethod(Exception):
     def __init__(self):
         Exception.__init__(self)
 
+class MissingChecksumEvent(bb.event.Event):
+    def __init__(self, url, md5sum, sha256sum):
+        self.url = url
+        self.checksums = {'md5sum': md5sum,
+                          'sha256sum': sha256sum}
+        bb.event.Event.__init__(self)
+
 
 class URI(object):
     """
@@ -355,7 +363,7 @@ def decodeurl(url):
     user, password, parameters).
     """
 
-    m = re.compile('(?P<type>[^:]*)://((?P<user>[^/]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
+    m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
     if not m:
         raise MalformedUrl(url)
 
@@ -403,8 +411,6 @@ def encodeurl(decoded):
 
     type, host, path, user, pswd, p = decoded
 
-    if not path:
-        raise MissingParameterError('path', "encoded from the data %s" % str(decoded))
     if not type:
         raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
     url = '%s://' % type
@@ -415,17 +421,18 @@ def encodeurl(decoded):
         url += "@"
     if host and type != "file":
         url += "%s" % host
-    # Standardise path to ensure comparisons work
-    while '//' in path:
-        path = path.replace("//", "/")
-    url += "%s" % urllib.parse.quote(path)
+    if path:
+        # Standardise path to ensure comparisons work
+        while '//' in path:
+            path = path.replace("//", "/")
+        url += "%s" % urllib.parse.quote(path)
     if p:
         for parm in p:
             url += ";%s=%s" % (parm, p[parm])
 
     return url
 
-def uri_replace(ud, uri_find, uri_replace, replacements, d):
+def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
     if not ud.url or not uri_find or not uri_replace:
         logger.error("uri_replace: passed an undefined value, not replacing")
         return None
@@ -455,7 +462,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d):
                 result_decoded[loc][k] = uri_replace_decoded[loc][k]
         elif (re.match(regexp, uri_decoded[loc])):
             if not uri_replace_decoded[loc]:
-                result_decoded[loc] = ""    
+                result_decoded[loc] = ""
             else:
                 for k in replacements:
                     uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
@@ -464,9 +471,9 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d):
             if loc == 2:
                 # Handle path manipulations
                 basename = None
-                if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball:
+                if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball:
                     # If the source and destination url types differ, must be a mirrortarball mapping
-                    basename = os.path.basename(ud.mirrortarball)
+                    basename = os.path.basename(mirrortarball)
                     # Kill parameters, they make no sense for mirror tarballs
                     uri_decoded[5] = {}
                 elif ud.localpath and ud.method.supports_checksum(ud):
@@ -491,7 +498,7 @@ def fetcher_init(d):
     Calls before this must not hit the cache.
     """
     # When to drop SCM head revisions controlled by user policy
-    srcrev_policy = d.getVar('BB_SRCREV_POLICY', True) or "clear"
+    srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
     if srcrev_policy == "cache":
         logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
     elif srcrev_policy == "clear":
@@ -537,7 +544,11 @@ def fetcher_compare_revisions():
     return False
 
 def mirror_from_string(data):
-    return [ i.split() for i in (data or "").replace('\\n','\n').split('\n') if i ]
+    mirrors = (data or "").replace('\\n',' ').split()
+    # Split into pairs
+    if len(mirrors) % 2 != 0:
+        bb.warn('Invalid mirror data %s, should have paired members.' % data)
+    return list(zip(*[iter(mirrors)]*2))
 
 def verify_checksum(ud, d, precomputed={}):
     """
@@ -572,7 +583,7 @@ def verify_checksum(ud, d, precomputed={}):
 
     if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected:
         # If strict checking enabled and neither sum defined, raise error
-        strict = d.getVar("BB_STRICT_CHECKSUM", True) or "0"
+        strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
         if strict == "1":
             logger.error('No checksum specified for %s, please add at least one to the recipe:\n'
                              'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
@@ -580,6 +591,14 @@ def verify_checksum(ud, d, precomputed={}):
                               ud.sha256_name, sha256data))
             raise NoChecksumError('Missing SRC_URI checksum', ud.url)
 
+        bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d)
+
+        if strict == "ignore":
+            return {
+                _MD5_KEY: md5data,
+                _SHA256_KEY: sha256data
+            }
+
         # Log missing sums so user can more easily add them
         logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
                        'SRC_URI[%s] = "%s"',
@@ -621,7 +640,7 @@ def verify_donestamp(ud, d, origud=None):
     Returns True, if the donestamp exists and is valid, False otherwise. When
     returning False, any existing done stamps are removed.
     """
-    if not ud.needdonestamp:
+    if not ud.needdonestamp or (origud and not origud.needdonestamp):
         return True
 
     if not os.path.exists(ud.donestamp):
@@ -718,18 +737,18 @@ def subprocess_setup():
 
 def get_autorev(d):
     #  only not cache src rev in autorev case
-    if d.getVar('BB_SRCREV_POLICY', True) != "cache":
+    if d.getVar('BB_SRCREV_POLICY') != "cache":
         d.setVar('BB_DONT_CACHE', '1')
     return "AUTOINC"
 
 def get_srcrev(d, method_name='sortable_revision'):
     """
-    Return the revsion string, usually for use in the version string (PV) of the current package
+    Return the revision string, usually for use in the version string (PV) of the current package
     Most packages usually only have one SCM so we just pass on the call.
     In the multi SCM case, we build a value based on SRCREV_FORMAT which must
     have been set.
 
-    The idea here is that we put the string "AUTOINC+" into return value if the revisions are not 
+    The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
     incremental, other code is then responsible for turning that into an increasing value (if needed)
 
     A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if
@@ -737,7 +756,7 @@ def get_srcrev(d, method_name='sortable_revision'):
     """
 
     scms = []
-    fetcher = Fetch(d.getVar('SRC_URI', True).split(), d)
+    fetcher = Fetch(d.getVar('SRC_URI').split(), d)
     urldata = fetcher.ud
     for u in urldata:
         if urldata[u].method.supports_srcrev():
@@ -757,7 +776,7 @@ def get_srcrev(d, method_name='sortable_revision'):
     #
     # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
     #
-    format = d.getVar('SRCREV_FORMAT', True)
+    format = d.getVar('SRCREV_FORMAT')
     if not format:
         raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
 
@@ -781,7 +800,7 @@ def get_srcrev(d, method_name='sortable_revision'):
     format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format)
 
     if seenautoinc:
-       format = "AUTOINC+" + format
+        format = "AUTOINC+" + format
 
     return format
 
@@ -819,12 +838,24 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
     if not cleanup:
         cleanup = []
 
+    # If PATH contains WORKDIR which contains PV which contains SRCPV we
+    # can end up in circular recursion here so give the option of breaking it
+    # in a data store copy.
+    try:
+        d.getVar("PV")
+    except bb.data_smart.ExpansionError:
+        d = bb.data.createCopy(d)
+        d.setVar("PV", "fetcheravoidrecurse")
+
     origenv = d.getVar("BB_ORIGENV", False)
     for var in exportvars:
-        val = d.getVar(var, True) or (origenv and origenv.getVar(var, True))
+        val = d.getVar(var) or (origenv and origenv.getVar(var))
         if val:
             cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
 
+    # Disable pseudo as it may affect ssh, potentially causing it to hang.
+    cmd = 'export PSEUDO_DISABLED=1; ' + cmd
+
     logger.debug(1, "Running %s", cmd)
 
     success = False
@@ -856,12 +887,15 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
 
     return output
 
-def check_network_access(d, info = "", url = None):
+def check_network_access(d, info, url):
     """
-    log remote network access, and error if BB_NO_NETWORK is set
+    log remote network access, and error if BB_NO_NETWORK is set or the given
+    URI is untrusted
     """
-    if d.getVar("BB_NO_NETWORK", True) == "1":
+    if d.getVar("BB_NO_NETWORK") == "1":
         raise NetworkAccess(url, info)
+    elif not trusted_network(d, url):
+        raise UntrustedUrl(url, info)
     else:
         logger.debug(1, "Fetcher accessed the network with the command %s" % info)
 
@@ -876,45 +910,47 @@ def build_mirroruris(origud, mirrors, ld):
     replacements["BASENAME"] = origud.path.split("/")[-1]
     replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
 
-    def adduri(ud, uris, uds, mirrors):
+    def adduri(ud, uris, uds, mirrors, tarballs):
         for line in mirrors:
             try:
                 (find, replace) = line
             except ValueError:
                 continue
-            newuri = uri_replace(ud, find, replace, replacements, ld)
-            if not newuri or newuri in uris or newuri == origud.url:
-                continue
 
-            if not trusted_network(ld, newuri):
-                logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" %  (newuri))
-                continue
+            for tarball in tarballs:
+                newuri = uri_replace(ud, find, replace, replacements, ld, tarball)
+                if not newuri or newuri in uris or newuri == origud.url:
+                    continue
 
-            # Create a local copy of the mirrors minus the current line
-            # this will prevent us from recursively processing the same line
-            # as well as indirect recursion A -> B -> C -> A
-            localmirrors = list(mirrors)
-            localmirrors.remove(line)
+                if not trusted_network(ld, newuri):
+                    logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" %  (newuri))
+                    continue
+
+                # Create a local copy of the mirrors minus the current line
+                # this will prevent us from recursively processing the same line
+                # as well as indirect recursion A -> B -> C -> A
+                localmirrors = list(mirrors)
+                localmirrors.remove(line)
 
-            try:
-                newud = FetchData(newuri, ld)
-                newud.setup_localpath(ld)
-            except bb.fetch2.BBFetchException as e:
-                logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
-                logger.debug(1, str(e))
                 try:
-                    # setup_localpath of file:// urls may fail, we should still see 
-                    # if mirrors of the url exist
-                    adduri(newud, uris, uds, localmirrors)
-                except UnboundLocalError:
-                    pass
-                continue   
-            uris.append(newuri)
-            uds.append(newud)
+                    newud = FetchData(newuri, ld)
+                    newud.setup_localpath(ld)
+                except bb.fetch2.BBFetchException as e:
+                    logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
+                    logger.debug(1, str(e))
+                    try:
+                        # setup_localpath of file:// urls may fail, we should still see
+                        # if mirrors of the url exist
+                        adduri(newud, uris, uds, localmirrors, tarballs)
+                    except UnboundLocalError:
+                        pass
+                    continue
+                uris.append(newuri)
+                uds.append(newud)
 
-            adduri(newud, uris, uds, localmirrors)
+                adduri(newud, uris, uds, localmirrors, tarballs)
 
-    adduri(origud, uris, uds, mirrors)
+    adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None])
 
     return uris, uds
 
@@ -958,19 +994,26 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
 
         # We may be obtaining a mirror tarball which needs further processing by the real fetcher
         # If that tarball is a local file:// we need to provide a symlink to it
-        dldir = ld.getVar("DL_DIR", True)
-        if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \
-                and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
+        dldir = ld.getVar("DL_DIR")
+
+        if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
             # Create donestamp in old format to avoid triggering a re-download
             if ud.donestamp:
                 bb.utils.mkdirhier(os.path.dirname(ud.donestamp))
                 open(ud.donestamp, 'w').close()
             dest = os.path.join(dldir, os.path.basename(ud.localpath))
             if not os.path.exists(dest):
-                os.symlink(ud.localpath, dest)
+                # In case this is executing without any file locks held (as is
+                # the case for file:// URLs), two tasks may end up here at the
+                # same time, in which case we do not want the second task to
+                # fail when the link has already been created by the first task.
+                try:
+                    os.symlink(ud.localpath, dest)
+                except FileExistsError:
+                    pass
             if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld):
                 origud.method.download(origud, ld)
-                if hasattr(origud.method,"build_mirror_data"):
+                if hasattr(origud.method, "build_mirror_data"):
                     origud.method.build_mirror_data(origud, ld)
             return origud.localpath
         # Otherwise the result is a local file:// and we symlink to it
@@ -979,13 +1022,23 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
                 # Broken symbolic link
                 os.unlink(origud.localpath)
 
-            os.symlink(ud.localpath, origud.localpath)
+            # As per above, in case two tasks end up here simultaneously.
+            try:
+                os.symlink(ud.localpath, origud.localpath)
+            except FileExistsError:
+                pass
         update_stamp(origud, ld)
         return ud.localpath
 
     except bb.fetch2.NetworkAccess:
         raise
 
+    except IOError as e:
+        if e.errno in [os.errno.ESTALE]:
+            logger.warning("Stale Error Observed %s." % ud.url)
+            return False
+        raise
+
     except bb.fetch2.BBFetchException as e:
         if isinstance(e, ChecksumError):
             logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url))
@@ -1032,14 +1085,14 @@ def trusted_network(d, url):
     BB_ALLOWED_NETWORKS is set globally or for a specific recipe.
     Note: modifies SRC_URI & mirrors.
     """
-    if d.getVar('BB_NO_NETWORK', True) == "1":
+    if d.getVar('BB_NO_NETWORK') == "1":
         return True
 
     pkgname = d.expand(d.getVar('PN', False))
     trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
 
     if not trusted_hosts:
-        trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS', True)
+        trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
 
     # Not enabled.
     if not trusted_hosts:
@@ -1071,7 +1124,7 @@ def srcrev_internal_helper(ud, d, name):
     """
 
     srcrev = None
-    pn = d.getVar("PN", True)
+    pn = d.getVar("PN")
     attempts = []
     if name != '' and pn:
         attempts.append("SRCREV_%s_pn-%s" % (name, pn))
@@ -1082,7 +1135,7 @@ def srcrev_internal_helper(ud, d, name):
     attempts.append("SRCREV")
 
     for a in attempts:
-        srcrev = d.getVar(a, True)              
+        srcrev = d.getVar(a)
         if srcrev and srcrev != "INVALID":
             break
 
@@ -1097,7 +1150,7 @@ def srcrev_internal_helper(ud, d, name):
         if srcrev == "INVALID" or not srcrev:
             return parmrev
         if srcrev != parmrev:
-            raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please spcify one valid value" % (srcrev, parmrev))
+            raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
         return parmrev
 
     if srcrev == "INVALID" or not srcrev:
@@ -1115,7 +1168,7 @@ def get_checksum_file_list(d):
     """
     fetch = Fetch([], d, cache = False, localonly = True)
 
-    dl_dir = d.getVar('DL_DIR', True)
+    dl_dir = d.getVar('DL_DIR')
     filelist = []
     for u in fetch.urls:
         ud = fetch.ud[u]
@@ -1129,9 +1182,9 @@ def get_checksum_file_list(d):
                 if f.startswith(dl_dir):
                     # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
                     if os.path.exists(f):
-                        bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN', True), os.path.basename(f)))
+                        bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
                     else:
-                        bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN', True), os.path.basename(f)))
+                        bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
                 filelist.append(f + ":" + str(os.path.exists(f)))
 
     return " ".join(filelist)
@@ -1157,10 +1210,10 @@ class FetchData(object):
         self.localfile = ""
         self.localpath = None
         self.lockfile = None
-        self.mirrortarball = None
+        self.mirrortarballs = []
         self.basename = None
         self.basepath = None
-        (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(data.expand(url, d))
+        (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url))
         self.date = self.getSRCDate(d)
         self.url = url
         if not self.user and "user" in self.parm:
@@ -1177,16 +1230,16 @@ class FetchData(object):
             self.sha256_name = "sha256sum"
         if self.md5_name in self.parm:
             self.md5_expected = self.parm[self.md5_name]
-        elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
+        elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
             self.md5_expected = None
         else:
-            self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name, True)
+            self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
         if self.sha256_name in self.parm:
             self.sha256_expected = self.parm[self.sha256_name]
-        elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
+        elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
             self.sha256_expected = None
         else:
-            self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name, True)
+            self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
         self.ignore_checksums = False
 
         self.names = self.parm.get("name",'default').split(',')
@@ -1195,7 +1248,7 @@ class FetchData(object):
         for m in methods:
             if m.supports(self, d):
                 self.method = m
-                break                
+                break
 
         if not self.method:
             raise NoMethodError(url)
@@ -1204,7 +1257,7 @@ class FetchData(object):
             raise NonLocalMethod()
 
         if self.parm.get("proto", None) and "protocol" not in self.parm:
-            logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN', True))
+            logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN'))
             self.parm["protocol"] = self.parm.get("proto", None)
 
         if hasattr(self.method, "urldata_init"):
@@ -1217,7 +1270,7 @@ class FetchData(object):
         elif self.localfile:
             self.localpath = self.method.localpath(self, d)
 
-        dldir = d.getVar("DL_DIR", True)
+        dldir = d.getVar("DL_DIR")
 
         if not self.needdonestamp:
             return
@@ -1230,12 +1283,12 @@ class FetchData(object):
         elif self.basepath or self.basename:
             basepath = dldir + os.sep + (self.basepath or self.basename)
         else:
-             bb.fatal("Can't determine lock path for url %s" % url)
+            bb.fatal("Can't determine lock path for url %s" % url)
 
         self.donestamp = basepath + '.done'
         self.lockfile = basepath + '.lock'
 
-    def setup_revisons(self, d):
+    def setup_revisions(self, d):
         self.revisions = {}
         for name in self.names:
             self.revisions[name] = srcrev_internal_helper(self, d, name)
@@ -1257,12 +1310,12 @@ class FetchData(object):
         if "srcdate" in self.parm:
             return self.parm['srcdate']
 
-        pn = d.getVar("PN", True)
+        pn = d.getVar("PN")
 
         if pn:
-            return d.getVar("SRCDATE_%s" % pn, True) or d.getVar("SRCDATE", True) or d.getVar("DATE", True)
+            return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE")
 
-        return d.getVar("SRCDATE", True) or d.getVar("DATE", True)
+        return d.getVar("SRCDATE") or d.getVar("DATE")
 
 class FetchMethod(object):
     """Base class for 'fetch'ing data"""
@@ -1282,7 +1335,7 @@ class FetchMethod(object):
         Can also setup variables in urldata for use in go (saving code duplication
         and duplicate code execution)
         """
-        return os.path.join(data.getVar("DL_DIR", d, True), urldata.localfile)
+        return os.path.join(d.getVar("DL_DIR"), urldata.localfile)
 
     def supports_checksum(self, urldata):
         """
@@ -1293,13 +1346,13 @@ class FetchMethod(object):
         if os.path.isdir(urldata.localpath) == True:
             return False
         if urldata.localpath.find("*") != -1:
-             return False
+            return False
 
         return True
 
     def recommends_checksum(self, urldata):
         """
-        Is the backend on where checksumming is recommended (should warnings 
+        Is the backend on where checksumming is recommended (should warnings
         be displayed if there is no checksum)?
         """
         return False
@@ -1374,7 +1427,7 @@ class FetchMethod(object):
                 cmd = 'gzip -dc %s > %s' % (file, efile)
             elif file.endswith('.bz2'):
                 cmd = 'bzip2 -dc %s > %s' % (file, efile)
-            elif file.endswith('.tar.xz'):
+            elif file.endswith('.txz') or file.endswith('.tar.xz'):
                 cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
             elif file.endswith('.xz'):
                 cmd = 'xz -dc %s > %s' % (file, efile)
@@ -1382,6 +1435,10 @@ class FetchMethod(object):
                 cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
             elif file.endswith('.lz'):
                 cmd = 'lzip -dc %s > %s' % (file, efile)
+            elif file.endswith('.tar.7z'):
+                cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
+            elif file.endswith('.7z'):
+                cmd = '7za x -y %s 1>/dev/null' % file
             elif file.endswith('.zip') or file.endswith('.jar'):
                 try:
                     dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
@@ -1413,10 +1470,6 @@ class FetchMethod(object):
                 else:
                     raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
                 cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
-            elif file.endswith('.tar.7z'):
-                cmd = '7z x -so %s | tar xf - ' % file
-            elif file.endswith('.7z'):
-                cmd = '7za x -y %s 1>/dev/null' % file
 
         # If 'subdir' param exists, create a dir and use it as destination for unpack cmd
         if 'subdir' in urldata.parm:
@@ -1450,7 +1503,7 @@ class FetchMethod(object):
         if not cmd:
             return
 
-        path = data.getVar('PATH', True)
+        path = data.getVar('PATH')
         if path:
             cmd = "PATH=\"%s\" %s" % (path, cmd)
         bb.note("Unpacking %s to %s/" % (file, unpackdir))
@@ -1507,7 +1560,15 @@ class FetchMethod(object):
 
     def generate_revision_key(self, ud, d, name):
         key = self._revision_key(ud, d, name)
-        return "%s-%s" % (key, d.getVar("PN", True) or "")
+        return "%s-%s" % (key, d.getVar("PN") or "")
+
+    def latest_versionstring(self, ud, d):
+        """
+        Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
+        by searching through the tags output of ls-remote, comparing
+        versions and returning the highest match as a (version, revision) pair.
+        """
+        return ('', '')
 
 class Fetch(object):
     def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
@@ -1515,14 +1576,14 @@ class Fetch(object):
             raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
 
         if len(urls) == 0:
-            urls = d.getVar("SRC_URI", True).split()
+            urls = d.getVar("SRC_URI").split()
         self.urls = urls
         self.d = d
         self.ud = {}
         self.connection_cache = connection_cache
 
-        fn = d.getVar('FILE', True)
-        mc = d.getVar('__BBMULTICONFIG', True) or ""
+        fn = d.getVar('FILE')
+        mc = d.getVar('__BBMULTICONFIG') or ""
         if cache and fn and mc + fn in urldata_cache:
             self.ud = urldata_cache[mc + fn]
 
@@ -1565,8 +1626,8 @@ class Fetch(object):
         if not urls:
             urls = self.urls
 
-        network = self.d.getVar("BB_NO_NETWORK", True)
-        premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY", True) == "1")
+        network = self.d.getVar("BB_NO_NETWORK")
+        premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1")
 
         for u in urls:
             ud = self.ud[u]
@@ -1579,13 +1640,22 @@ class Fetch(object):
 
             try:
                 self.d.setVar("BB_NO_NETWORK", network)
- 
+
                 if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
                     localpath = ud.localpath
                 elif m.try_premirror(ud, self.d):
                     logger.debug(1, "Trying PREMIRRORS")
-                    mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
+                    mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
                     localpath = try_mirrors(self, self.d, ud, mirrors, False)
+                    if localpath:
+                        try:
+                            # early checksum verification so that if the checksum of the premirror
+                            # contents mismatch the fetcher can still try upstream and mirrors
+                            update_stamp(ud, self.d)
+                        except ChecksumError as e:
+                            logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
+                            logger.debug(1, str(e))
+                            localpath = ""
 
                 if premirroronly:
                     self.d.setVar("BB_NO_NETWORK", "1")
@@ -1624,7 +1694,7 @@ class Fetch(object):
                         if not verified_stamp:
                             m.clean(ud, self.d)
                         logger.debug(1, "Trying MIRRORS")
-                        mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
+                        mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
                         localpath = try_mirrors(self, self.d, ud, mirrors)
 
                 if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
@@ -1634,6 +1704,11 @@ class Fetch(object):
 
                 update_stamp(ud, self.d)
 
+            except IOError as e:
+                if e.errno in [os.errno.ESTALE]:
+                    logger.error("Stale Error Observed %s." % u)
+                    raise ChecksumError("Stale Error Detected")
+
             except BBFetchException as e:
                 if isinstance(e, ChecksumError):
                     logger.error("Checksum failure fetching %s" % u)
@@ -1657,15 +1732,14 @@ class Fetch(object):
             m = ud.method
             logger.debug(1, "Testing URL %s", u)
             # First try checking uri, u, from PREMIRRORS
-            mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
+            mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
             ret = try_mirrors(self, self.d, ud, mirrors, True)
             if not ret:
                 # Next try checking from the original uri, u
-                try:
-                    ret = m.checkstatus(self, ud, self.d)
-                except:
+                ret = m.checkstatus(self, ud, self.d)
+                if not ret:
                     # Finally, try checking uri, u, from MIRRORS
-                    mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
+                    mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
                     ret = try_mirrors(self, self.d, ud, mirrors, True)
 
             if not ret:
@@ -1673,7 +1747,7 @@ class Fetch(object):
 
     def unpack(self, root, urls=None):
         """
-        Check all urls exist upstream
+        Unpack urls to root
         """
 
         if not urls:
@@ -1763,6 +1837,7 @@ from . import svn
 from . import wget
 from . import ssh
 from . import sftp
+from . import s3
 from . import perforce
 from . import bzr
 from . import hg
@@ -1780,6 +1855,7 @@ methods.append(gitannex.GitANNEX())
 methods.append(cvs.Cvs())
 methods.append(ssh.SSH())
 methods.append(sftp.SFTP())
+methods.append(s3.S3())
 methods.append(perforce.Perforce())
 methods.append(bzr.Bzr())
 methods.append(hg.Hg())

+ 4 - 5
bitbake/lib/bb/fetch2/bzr.py

@@ -27,7 +27,6 @@ import os
 import sys
 import logging
 import bb
-from bb import data
 from bb.fetch2 import FetchMethod
 from bb.fetch2 import FetchError
 from bb.fetch2 import runfetchcmd
@@ -43,14 +42,14 @@ class Bzr(FetchMethod):
         """
         # Create paths to bzr checkouts
         relpath = self._strip_leading_slashes(ud.path)
-        ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
+        ud.pkgdir = os.path.join(d.expand('${BZRDIR}'), ud.host, relpath)
 
-        ud.setup_revisons(d)
+        ud.setup_revisions(d)
 
         if not ud.revision:
             ud.revision = self.latest_revision(ud, d)
 
-        ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
+        ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision))
 
     def _buildbzrcommand(self, ud, d, command):
         """
@@ -58,7 +57,7 @@ class Bzr(FetchMethod):
         command is "fetch", "update", "revno"
         """
 
-        basecmd = data.expand('${FETCHCMD_bzr}', d)
+        basecmd = d.expand('${FETCHCMD_bzr}')
 
         proto =  ud.parm.get('protocol', 'http')
 

+ 5 - 6
bitbake/lib/bb/fetch2/clearcase.py

@@ -65,7 +65,6 @@ import os
 import sys
 import shutil
 import bb
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import runfetchcmd
@@ -108,13 +107,13 @@ class ClearCase(FetchMethod):
         else:
             ud.module = ""
 
-        ud.basecmd = d.getVar("FETCHCMD_ccrc", True) or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
+        ud.basecmd = d.getVar("FETCHCMD_ccrc") or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
 
-        if data.getVar("SRCREV", d, True) == "INVALID":
+        if d.getVar("SRCREV") == "INVALID":
           raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
 
         ud.label = d.getVar("SRCREV", False)
-        ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", True)
+        ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC")
 
         ud.server     = "%s://%s%s" % (ud.proto, ud.host, ud.path)
 
@@ -124,7 +123,7 @@ class ClearCase(FetchMethod):
 
         ud.viewname         = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
         ud.csname           = "%s-config-spec" % (ud.identifier)
-        ud.ccasedir         = os.path.join(data.getVar("DL_DIR", d, True), ud.type)
+        ud.ccasedir         = os.path.join(d.getVar("DL_DIR"), ud.type)
         ud.viewdir          = os.path.join(ud.ccasedir, ud.viewname)
         ud.configspecfile   = os.path.join(ud.ccasedir, ud.csname)
         ud.localfile        = "%s.tar.gz" % (ud.identifier)
@@ -144,7 +143,7 @@ class ClearCase(FetchMethod):
         self.debug("configspecfile  = %s" % ud.configspecfile)
         self.debug("localfile       = %s" % ud.localfile)
 
-        ud.localfile = os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
+        ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
 
     def _build_ccase_command(self, ud, command):
         """

+ 8 - 8
bitbake/lib/bb/fetch2/cvs.py

@@ -63,7 +63,7 @@ class Cvs(FetchMethod):
         if 'fullpath' in ud.parm:
             fullpath = '_fullpath'
 
-        ud.localfile = bb.data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
+        ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath))
 
     def need_update(self, ud, d):
         if (ud.date == "now"):
@@ -87,10 +87,10 @@ class Cvs(FetchMethod):
             cvsroot = ud.path
         else:
             cvsroot = ":" + method
-            cvsproxyhost = d.getVar('CVS_PROXY_HOST', True)
+            cvsproxyhost = d.getVar('CVS_PROXY_HOST')
             if cvsproxyhost:
                 cvsroot += ";proxy=" + cvsproxyhost
-            cvsproxyport = d.getVar('CVS_PROXY_PORT', True)
+            cvsproxyport = d.getVar('CVS_PROXY_PORT')
             if cvsproxyport:
                 cvsroot += ";proxyport=" + cvsproxyport
             cvsroot += ":" + ud.user
@@ -110,7 +110,7 @@ class Cvs(FetchMethod):
         if ud.tag:
             options.append("-r %s" % ud.tag)
 
-        cvsbasecmd = d.getVar("FETCHCMD_cvs", True)
+        cvsbasecmd = d.getVar("FETCHCMD_cvs")
         cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
         cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
 
@@ -120,8 +120,8 @@ class Cvs(FetchMethod):
 
         # create module directory
         logger.debug(2, "Fetch: checking for module directory")
-        pkg = d.getVar('PN', True)
-        pkgdir = os.path.join(d.getVar('CVSDIR', True), pkg)
+        pkg = d.getVar('PN')
+        pkgdir = os.path.join(d.getVar('CVSDIR'), pkg)
         moddir = os.path.join(pkgdir, localdir)
         workdir = None
         if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
@@ -164,8 +164,8 @@ class Cvs(FetchMethod):
     def clean(self, ud, d):
         """ Clean CVS Files and tarballs """
 
-        pkg = d.getVar('PN', True)
-        pkgdir = os.path.join(d.getVar("CVSDIR", True), pkg)
+        pkg = d.getVar('PN')
+        pkgdir = os.path.join(d.getVar("CVSDIR"), pkg)
 
         bb.utils.remove(pkgdir, True)
         bb.utils.remove(ud.localpath)

+ 210 - 39
bitbake/lib/bb/fetch2/git.py

@@ -50,7 +50,7 @@ Supported SRC_URI options are:
    The default is "0", set nobranch=1 if needed.
 
 - usehead
-   For local git:// urls to use the current branch HEAD as the revsion for use with
+   For local git:// urls to use the current branch HEAD as the revision for use with
    AUTOREV. Implies nobranch.
 
 """
@@ -70,13 +70,15 @@ Supported SRC_URI options are:
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
+import collections
 import errno
+import fnmatch
 import os
 import re
+import subprocess
+import tempfile
 import bb
-import errno
 import bb.progress
-from   bb    import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import runfetchcmd
 from   bb.fetch2 import logger
@@ -173,20 +175,68 @@ class Git(FetchMethod):
         branches = ud.parm.get("branch", "master").split(',')
         if len(branches) != len(ud.names):
             raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
+
+        ud.cloneflags = "-s -n"
+        if ud.bareclone:
+            ud.cloneflags += " --mirror"
+
+        ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
+        ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
+
+        depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH")
+        if depth_default is not None:
+            try:
+                depth_default = int(depth_default or 0)
+            except ValueError:
+                raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
+            else:
+                if depth_default < 0:
+                    raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
+        else:
+            depth_default = 1
+        ud.shallow_depths = collections.defaultdict(lambda: depth_default)
+
+        revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True)
+        ud.shallow_revs = []
         ud.branches = {}
-        for name in ud.names:
-            branch = branches[ud.names.index(name)]
+        for pos, name in enumerate(ud.names):
+            branch = branches[pos]
             ud.branches[name] = branch
             ud.unresolvedrev[name] = branch
 
+            shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
+            if shallow_depth is not None:
+                try:
+                    shallow_depth = int(shallow_depth or 0)
+                except ValueError:
+                    raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
+                else:
+                    if shallow_depth < 0:
+                        raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
+                    ud.shallow_depths[name] = shallow_depth
+
+            revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
+            if revs is not None:
+                ud.shallow_revs.extend(revs.split())
+            elif revs_default is not None:
+                ud.shallow_revs.extend(revs_default.split())
+
+        if (ud.shallow and
+                not ud.shallow_revs and
+                all(ud.shallow_depths[n] == 0 for n in ud.names)):
+            # Shallow disabled for this URL
+            ud.shallow = False
+
         if ud.usehead:
             ud.unresolvedrev['default'] = 'HEAD'
 
-        ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git -c core.fsyncobjectfiles=0"
+        ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
 
-        ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
+        write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
+        ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
+        ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0"
 
-        ud.setup_revisons(d)
+        ud.setup_revisions(d)
 
         for name in ud.names:
             # Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
@@ -206,13 +256,42 @@ class Git(FetchMethod):
         if ud.rebaseable:
             for name in ud.names:
                 gitsrcname = gitsrcname + '_' + ud.revisions[name]
-        ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
-        ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
-        gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/")
-        ud.clonedir = os.path.join(gitdir, gitsrcname)
 
+        dl_dir = d.getVar("DL_DIR")
+        gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/")
+        ud.clonedir = os.path.join(gitdir, gitsrcname)
         ud.localfile = ud.clonedir
 
+        mirrortarball = 'git2_%s.tar.gz' % gitsrcname
+        ud.fullmirror = os.path.join(dl_dir, mirrortarball)
+        ud.mirrortarballs = [mirrortarball]
+        if ud.shallow:
+            tarballname = gitsrcname
+            if ud.bareclone:
+                tarballname = "%s_bare" % tarballname
+
+            if ud.shallow_revs:
+                tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
+
+            for name, revision in sorted(ud.revisions.items()):
+                tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
+                depth = ud.shallow_depths[name]
+                if depth:
+                    tarballname = "%s-%s" % (tarballname, depth)
+
+            shallow_refs = []
+            if not ud.nobranch:
+                shallow_refs.extend(ud.branches.values())
+            if ud.shallow_extra_refs:
+                shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
+            if shallow_refs:
+                tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.'))
+
+            fetcher = self.__class__.__name__.lower()
+            ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname)
+            ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball)
+            ud.mirrortarballs.insert(0, ud.shallowtarball)
+
     def localpath(self, ud, d):
         return ud.clonedir
 
@@ -222,6 +301,8 @@ class Git(FetchMethod):
         for name in ud.names:
             if not self._contains_ref(ud, d, name, ud.clonedir):
                 return True
+        if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow):
+            return True
         if ud.write_tarballs and not os.path.exists(ud.fullmirror):
             return True
         return False
@@ -229,7 +310,7 @@ class Git(FetchMethod):
     def try_premirror(self, ud, d):
         # If we don't do this, updating an existing checkout with only premirrors
         # is not possible
-        if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
+        if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
             return True
         if os.path.exists(ud.clonedir):
             return False
@@ -238,10 +319,18 @@ class Git(FetchMethod):
     def download(self, ud, d):
         """Fetch url"""
 
-        # If the checkout doesn't exist and the mirror tarball does, extract it
-        if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
+        no_clone = not os.path.exists(ud.clonedir)
+        need_update = no_clone or self.need_update(ud, d)
+
+        # A current clone is preferred to either tarball, a shallow tarball is
+        # preferred to an out of date clone, and a missing clone will use
+        # either tarball.
+        if ud.shallow and os.path.exists(ud.fullshallow) and need_update:
+            ud.localpath = ud.fullshallow
+            return
+        elif os.path.exists(ud.fullmirror) and no_clone:
             bb.utils.mkdirhier(ud.clonedir)
-            runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.clonedir)
+            runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
 
         repourl = self._get_repo_url(ud)
 
@@ -252,7 +341,7 @@ class Git(FetchMethod):
                 repourl = repourl[7:]
             clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir)
             if ud.proto.lower() != 'file':
-                bb.fetch2.check_network_access(d, clone_cmd)
+                bb.fetch2.check_network_access(d, clone_cmd, ud.url)
             progresshandler = GitProgressHandler(d)
             runfetchcmd(clone_cmd, d, log=progresshandler)
 
@@ -274,6 +363,7 @@ class Git(FetchMethod):
             progresshandler = GitProgressHandler(d)
             runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
             runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
+            runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
             runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
             try:
                 os.unlink(ud.fullmirror)
@@ -285,22 +375,90 @@ class Git(FetchMethod):
                 raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
 
     def build_mirror_data(self, ud, d):
-        # Generate a mirror tarball if needed
-        if ud.write_tarballs and not os.path.exists(ud.fullmirror):
-            # it's possible that this symlink points to read-only filesystem with PREMIRROR
+        if ud.shallow and ud.write_shallow_tarballs:
+            if not os.path.exists(ud.fullshallow):
+                if os.path.islink(ud.fullshallow):
+                    os.unlink(ud.fullshallow)
+                tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
+                shallowclone = os.path.join(tempdir, 'git')
+                try:
+                    self.clone_shallow_local(ud, shallowclone, d)
+
+                    logger.info("Creating tarball of git repository")
+                    runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
+                    runfetchcmd("touch %s.done" % ud.fullshallow, d)
+                finally:
+                    bb.utils.remove(tempdir, recurse=True)
+        elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
             if os.path.islink(ud.fullmirror):
                 os.unlink(ud.fullmirror)
 
             logger.info("Creating tarball of git repository")
-            runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d, workdir=ud.clonedir)
-            runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.clonedir)
+            runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
+            runfetchcmd("touch %s.done" % ud.fullmirror, d)
+
+    def clone_shallow_local(self, ud, dest, d):
+        """Clone the repo and make it shallow.
+
+        The upstream url of the new clone isn't set at this time, as it'll be
+        set correctly when unpacked."""
+        runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
+
+        to_parse, shallow_branches = [], []
+        for name in ud.names:
+            revision = ud.revisions[name]
+            depth = ud.shallow_depths[name]
+            if depth:
+                to_parse.append('%s~%d^{}' % (revision, depth - 1))
+
+            # For nobranch, we need a ref, otherwise the commits will be
+            # removed, and for non-nobranch, we truncate the branch to our
+            # srcrev, to avoid keeping unnecessary history beyond that.
+            branch = ud.branches[name]
+            if ud.nobranch:
+                ref = "refs/shallow/%s" % name
+            elif ud.bareclone:
+                ref = "refs/heads/%s" % branch
+            else:
+                ref = "refs/remotes/origin/%s" % branch
+
+            shallow_branches.append(ref)
+            runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
+
+        # Map srcrev+depths to revisions
+        parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
+
+        # Resolve specified revisions
+        parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
+        shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
+
+        # Apply extra ref wildcards
+        all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
+                               d, workdir=dest).splitlines()
+        for r in ud.shallow_extra_refs:
+            if not ud.bareclone:
+                r = r.replace('refs/heads/', 'refs/remotes/origin/')
+
+            if '*' in r:
+                matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
+                shallow_branches.extend(matches)
+            else:
+                shallow_branches.append(r)
+
+        # Make the repository shallow
+        shallow_cmd = ['git', 'make-shallow', '-s']
+        for b in shallow_branches:
+            shallow_cmd.append('-r')
+            shallow_cmd.append(b)
+        shallow_cmd.extend(shallow_revisions)
+        runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
 
     def unpack(self, ud, destdir, d):
         """ unpack the downloaded src to destdir"""
 
         subdir = ud.parm.get("subpath", "")
         if subdir != "":
-            readpathspec = ":%s" % (subdir)
+            readpathspec = ":%s" % subdir
             def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
         else:
             readpathspec = ""
@@ -311,11 +469,12 @@ class Git(FetchMethod):
         if os.path.exists(destdir):
             bb.utils.prunedir(destdir)
 
-        cloneflags = "-s -n"
-        if ud.bareclone:
-            cloneflags += " --mirror"
+        if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)):
+            bb.utils.mkdirhier(destdir)
+            runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
+        else:
+            runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
 
-        runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, cloneflags, ud.clonedir, destdir), d)
         repourl = self._get_repo_url(ud)
         runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
         if not ud.nocheckout:
@@ -327,7 +486,7 @@ class Git(FetchMethod):
                 branchname =  ud.branches[ud.names[0]]
                 runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
                             ud.revisions[ud.names[0]]), d, workdir=destdir)
-                runfetchcmd("%s branch --set-upstream %s origin/%s" % (ud.basecmd, branchname, \
+                runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
                             branchname), d, workdir=destdir)
             else:
                 runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
@@ -380,14 +539,26 @@ class Git(FetchMethod):
         """
         Run git ls-remote with the specified search string
         """
-        repourl = self._get_repo_url(ud)
-        cmd = "%s ls-remote %s %s" % \
-              (ud.basecmd, repourl, search)
-        if ud.proto.lower() != 'file':
-            bb.fetch2.check_network_access(d, cmd)
-        output = runfetchcmd(cmd, d, True)
-        if not output:
-            raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
+        # Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR,
+        # and WORKDIR is in PATH (as a result of RSS), our call to
+        # runfetchcmd() exports PATH so this function will get called again (!)
+        # In this scenario the return call of the function isn't actually
+        # important - WORKDIR isn't needed in PATH to call git ls-remote
+        # anyway.
+        if d.getVar('_BB_GIT_IN_LSREMOTE', False):
+            return ''
+        d.setVar('_BB_GIT_IN_LSREMOTE', '1')
+        try:
+            repourl = self._get_repo_url(ud)
+            cmd = "%s ls-remote %s %s" % \
+                (ud.basecmd, repourl, search)
+            if ud.proto.lower() != 'file':
+                bb.fetch2.check_network_access(d, cmd, repourl)
+            output = runfetchcmd(cmd, d, True)
+            if not output:
+                raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
+        finally:
+            d.delVar('_BB_GIT_IN_LSREMOTE')
         return output
 
     def _latest_revision(self, ud, d, name):
@@ -418,7 +589,7 @@ class Git(FetchMethod):
         """
         pupver = ('', '')
 
-        tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX', True) or "(?P<pver>([0-9][\.|_]?)+)")
+        tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P<pver>([0-9][\.|_]?)+)")
         try:
             output = self._lsremote(ud, d, "refs/tags/*")
         except bb.fetch2.FetchError or bb.fetch2.NetworkAccess:
@@ -470,7 +641,7 @@ class Git(FetchMethod):
             if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
                 from pipes import quote
                 commits = bb.fetch2.runfetchcmd(
-                        "git rev-list %s -- | wc -l" % (quote(rev)),
+                        "git rev-list %s -- | wc -l" % quote(rev),
                         d, quiet=True).strip().lstrip('0')
                 if commits:
                     open(rev_file, "w").write("%d\n" % int(commits))
@@ -485,5 +656,5 @@ class Git(FetchMethod):
         try:
             self._lsremote(ud, d, "")
             return True
-        except FetchError:
+        except bb.fetch2.FetchError:
             return False

+ 20 - 4
bitbake/lib/bb/fetch2/gitannex.py

@@ -22,7 +22,6 @@ BitBake 'Fetch' git annex implementation
 
 import os
 import bb
-from   bb import data
 from   bb.fetch2.git import Git
 from   bb.fetch2 import runfetchcmd
 from   bb.fetch2 import logger
@@ -34,6 +33,11 @@ class GitANNEX(Git):
         """
         return ud.type in ['gitannex']
 
+    def urldata_init(self, ud, d):
+        super(GitANNEX, self).urldata_init(ud, d)
+        if ud.shallow:
+            ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*']
+
     def uses_annex(self, ud, d, wd):
         for name in ud.names:
             try:
@@ -56,9 +60,21 @@ class GitANNEX(Git):
     def download(self, ud, d):
         Git.download(self, ud, d)
 
-        annex = self.uses_annex(ud, d, ud.clonedir)
-        if annex:
-            self.update_annex(ud, d, ud.clonedir)
+        if not ud.shallow or ud.localpath != ud.fullshallow:
+            if self.uses_annex(ud, d, ud.clonedir):
+                self.update_annex(ud, d, ud.clonedir)
+
+    def clone_shallow_local(self, ud, dest, d):
+        super(GitANNEX, self).clone_shallow_local(ud, dest, d)
+
+        try:
+            runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest)
+        except bb.fetch.FetchError:
+            pass
+
+        if self.uses_annex(ud, d, dest):
+            runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest)
+            runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest)
 
     def unpack(self, ud, destdir, d):
         Git.unpack(self, ud, destdir, d)

+ 12 - 8
bitbake/lib/bb/fetch2/gitsm.py

@@ -31,7 +31,6 @@ NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your r
 
 import os
 import bb
-from   bb    import data
 from   bb.fetch2.git import Git
 from   bb.fetch2 import runfetchcmd
 from   bb.fetch2 import logger
@@ -108,7 +107,7 @@ class GitSM(Git):
         os.rename(ud.clonedir, gitdir)
         runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
         runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
-        runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
+        runfetchcmd(ud.basecmd + " checkout -f " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
         runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
         self._set_relative_paths(tmpclonedir)
         runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
@@ -118,14 +117,19 @@ class GitSM(Git):
     def download(self, ud, d):
         Git.download(self, ud, d)
 
-        submodules = self.uses_submodules(ud, d, ud.clonedir)
-        if submodules:
-            self.update_submodules(ud, d)
+        if not ud.shallow or ud.localpath != ud.fullshallow:
+            submodules = self.uses_submodules(ud, d, ud.clonedir)
+            if submodules:
+                self.update_submodules(ud, d)
+
+    def clone_shallow_local(self, ud, dest, d):
+        super(GitSM, self).clone_shallow_local(ud, dest, d)
+
+        runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d)
 
     def unpack(self, ud, destdir, d):
         Git.unpack(self, ud, destdir, d)
-        
-        submodules = self.uses_submodules(ud, d, ud.destdir)
-        if submodules:
+
+        if self.uses_submodules(ud, d, ud.destdir):
             runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
             runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)

+ 9 - 9
bitbake/lib/bb/fetch2/hg.py

@@ -29,7 +29,6 @@ import sys
 import logging
 import bb
 import errno
-from bb import data
 from bb.fetch2 import FetchMethod
 from bb.fetch2 import FetchError
 from bb.fetch2 import MissingParameterError
@@ -67,7 +66,7 @@ class Hg(FetchMethod):
         else:
             ud.proto = "hg"
 
-        ud.setup_revisons(d)
+        ud.setup_revisions(d)
 
         if 'rev' in ud.parm:
             ud.revision = ud.parm['rev']
@@ -77,16 +76,17 @@ class Hg(FetchMethod):
         # Create paths to mercurial checkouts
         hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
                             ud.host, ud.path.replace('/', '.'))
-        ud.mirrortarball = 'hg_%s.tar.gz' % hgsrcname
-        ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
+        mirrortarball = 'hg_%s.tar.gz' % hgsrcname
+        ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
+        ud.mirrortarballs = [mirrortarball]
 
-        hgdir = d.getVar("HGDIR", True) or (d.getVar("DL_DIR", True) + "/hg/")
+        hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/")
         ud.pkgdir = os.path.join(hgdir, hgsrcname)
         ud.moddir = os.path.join(ud.pkgdir, ud.module)
         ud.localfile = ud.moddir
-        ud.basecmd = data.getVar("FETCHCMD_hg", d, True) or "/usr/bin/env hg"
+        ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
 
-        ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS", True)
+        ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
 
     def need_update(self, ud, d):
         revTag = ud.parm.get('rev', 'tip')
@@ -99,7 +99,7 @@ class Hg(FetchMethod):
     def try_premirror(self, ud, d):
         # If we don't do this, updating an existing checkout with only premirrors
         # is not possible
-        if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
+        if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
             return True
         if os.path.exists(ud.moddir):
             return False
@@ -221,7 +221,7 @@ class Hg(FetchMethod):
         """
         Compute tip revision for the url
         """
-        bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"))
+        bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url)
         output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
         return output.strip()
 

+ 4 - 14
bitbake/lib/bb/fetch2/local.py

@@ -29,7 +29,6 @@ import os
 import urllib.request, urllib.parse, urllib.error
 import bb
 import bb.utils
-from   bb import data
 from   bb.fetch2 import FetchMethod, FetchError
 from   bb.fetch2 import logger
 
@@ -63,17 +62,11 @@ class Local(FetchMethod):
         newpath = path
         if path[0] == "/":
             return [path]
-        filespath = data.getVar('FILESPATH', d, True)
+        filespath = d.getVar('FILESPATH')
         if filespath:
             logger.debug(2, "Searching for %s in paths:\n    %s" % (path, "\n    ".join(filespath.split(":"))))
             newpath, hist = bb.utils.which(filespath, path, history=True)
             searched.extend(hist)
-        if not newpath:
-            filesdir = data.getVar('FILESDIR', d, True)
-            if filesdir:
-                logger.debug(2, "Searching for %s in path: %s" % (path, filesdir))
-                newpath = os.path.join(filesdir, path)
-                searched.append(newpath)
         if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
             # For expressions using '*', best we can do is take the first directory in FILESPATH that exists
             newpath, hist = bb.utils.which(filespath, ".", history=True)
@@ -81,7 +74,7 @@ class Local(FetchMethod):
             logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
             return searched
         if not os.path.exists(newpath):
-            dldirfile = os.path.join(d.getVar("DL_DIR", True), path)
+            dldirfile = os.path.join(d.getVar("DL_DIR"), path)
             logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
             bb.utils.mkdirhier(os.path.dirname(dldirfile))
             searched.append(dldirfile)
@@ -100,13 +93,10 @@ class Local(FetchMethod):
         # no need to fetch local files, we'll deal with them in place.
         if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
             locations = []
-            filespath = data.getVar('FILESPATH', d, True)
+            filespath = d.getVar('FILESPATH')
             if filespath:
                 locations = filespath.split(":")
-            filesdir = data.getVar('FILESDIR', d, True)
-            if filesdir:
-                locations.append(filesdir)
-            locations.append(d.getVar("DL_DIR", True))
+            locations.append(d.getVar("DL_DIR"))
 
             msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n    " + "\n    ".join(locations)
             raise FetchError(msg)

+ 42 - 28
bitbake/lib/bb/fetch2/npm.py

@@ -25,7 +25,6 @@ import json
 import subprocess
 import signal
 import bb
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import ChecksumError
@@ -80,6 +79,7 @@ class Npm(FetchMethod):
         if not ud.version:
             raise ParameterError("NPM fetcher requires a version parameter", ud.url)
         ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version)
+        ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-')
         ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0]
         prefixdir = "npm/%s" % ud.pkgname
         ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir)
@@ -87,12 +87,14 @@ class Npm(FetchMethod):
             bb.utils.mkdirhier(ud.pkgdatadir)
         ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest)
 
-        self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
+        self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
         ud.prefixdir = prefixdir
 
-        ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0")
-        ud.mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
-        ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
+        ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0")
+        mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
+        mirrortarball = mirrortarball.replace('/', '-')
+        ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
+        ud.mirrortarballs = [mirrortarball]
 
     def need_update(self, ud, d):
         if os.path.exists(ud.localpath):
@@ -101,8 +103,8 @@ class Npm(FetchMethod):
 
     def _runwget(self, ud, d, command, quiet):
         logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
-        bb.fetch2.check_network_access(d, command)
-        dldir = d.getVar("DL_DIR", True)
+        bb.fetch2.check_network_access(d, command, ud.url)
+        dldir = d.getVar("DL_DIR")
         runfetchcmd(command, d, quiet, workdir=dldir)
 
     def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
@@ -116,7 +118,7 @@ class Npm(FetchMethod):
         # Change to subdir before executing command
         if not os.path.exists(destdir):
             os.makedirs(destdir)
-        path = d.getVar('PATH', True)
+        path = d.getVar('PATH')
         if path:
             cmd = "PATH=\"%s\" %s" % (path, cmd)
         bb.note("Unpacking %s to %s/" % (file, destdir))
@@ -132,9 +134,8 @@ class Npm(FetchMethod):
 
 
     def unpack(self, ud, destdir, d):
-        dldir = d.getVar("DL_DIR", True)
-        depdumpfile = "%s-%s.deps.json" % (ud.pkgname, ud.version)
-        with open("%s/npm/%s" % (dldir, depdumpfile)) as datafile:
+        dldir = d.getVar("DL_DIR")
+        with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile:
             workobj = json.load(datafile)
         dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname)
 
@@ -182,7 +183,12 @@ class Npm(FetchMethod):
             if pkg_os:
                 if not isinstance(pkg_os, list):
                     pkg_os = [pkg_os]
-                if 'linux' not in pkg_os or '!linux' in pkg_os:
+                blacklist = False
+                for item in pkg_os:
+                    if item.startswith('!'):
+                        blacklist = True
+                        break
+                if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
                     logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
                     return
         #logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
@@ -195,6 +201,7 @@ class Npm(FetchMethod):
 
         dependencies = pdata.get('dependencies', {})
         optionalDependencies = pdata.get('optionalDependencies', {})
+        dependencies.update(optionalDependencies)
         depsfound = {}
         optdepsfound = {}
         data[pkg]['deps'] = {}
@@ -251,25 +258,32 @@ class Npm(FetchMethod):
         lockdown = {}
 
         if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
-            dest = d.getVar("DL_DIR", True)
+            dest = d.getVar("DL_DIR")
             bb.utils.mkdirhier(dest)
             runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
             return
 
-        shwrf = d.getVar('NPM_SHRINKWRAP', True)
-        logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
-        try:
-            with open(shwrf) as datafile:
-                shrinkobj = json.load(datafile)
-        except:
-            logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
-        lckdf = d.getVar('NPM_LOCKDOWN', True)
-        logger.debug(2, "NPM lockdown file is %s" % lckdf)
-        try:
-            with open(lckdf) as datafile:
-                lockdown = json.load(datafile)
-        except:
-            logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
+        if ud.parm.get("noverify", None) != '1':
+            shwrf = d.getVar('NPM_SHRINKWRAP')
+            logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
+            if shwrf:
+                try:
+                    with open(shwrf) as datafile:
+                        shrinkobj = json.load(datafile)
+                except Exception as e:
+                    raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e)))
+            elif not ud.ignore_checksums:
+                logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
+            lckdf = d.getVar('NPM_LOCKDOWN')
+            logger.debug(2, "NPM lockdown file is %s" % lckdf)
+            if lckdf:
+                try:
+                    with open(lckdf) as datafile:
+                        lockdown = json.load(datafile)
+                except Exception as e:
+                    raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e)))
+            elif not ud.ignore_checksums:
+                logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
 
         if ('name' not in shrinkobj):
             self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
@@ -286,7 +300,7 @@ class Npm(FetchMethod):
             if os.path.islink(ud.fullmirror):
                 os.unlink(ud.fullmirror)
 
-            dldir = d.getVar("DL_DIR", True)
+            dldir = d.getVar("DL_DIR")
             logger.info("Creating tarball of npm data")
             runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d,
                         workdir=dldir)

+ 7 - 8
bitbake/lib/bb/fetch2/osc.py

@@ -10,7 +10,6 @@ import  os
 import  sys
 import logging
 import  bb
-from    bb       import data
 from    bb.fetch2 import FetchMethod
 from    bb.fetch2 import FetchError
 from    bb.fetch2 import MissingParameterError
@@ -34,7 +33,7 @@ class Osc(FetchMethod):
 
         # Create paths to osc checkouts
         relpath = self._strip_leading_slashes(ud.path)
-        ud.pkgdir = os.path.join(d.getVar('OSCDIR', True), ud.host)
+        ud.pkgdir = os.path.join(d.getVar('OSCDIR'), ud.host)
         ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
 
         if 'rev' in ud.parm:
@@ -47,7 +46,7 @@ class Osc(FetchMethod):
             else:
                 ud.revision = ""
 
-        ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
+        ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
 
     def _buildosccommand(self, ud, d, command):
         """
@@ -55,7 +54,7 @@ class Osc(FetchMethod):
         command is "fetch", "update", "info"
         """
 
-        basecmd = data.expand('${FETCHCMD_osc}', d)
+        basecmd = d.expand('${FETCHCMD_osc}')
 
         proto = ud.parm.get('protocol', 'ocs')
 
@@ -84,7 +83,7 @@ class Osc(FetchMethod):
 
         logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
 
-        if os.access(os.path.join(d.getVar('OSCDIR', True), ud.path, ud.module), os.R_OK):
+        if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
             oscupdatecmd = self._buildosccommand(ud, d, "update")
             logger.info("Update "+ ud.url)
             # update sources there
@@ -112,7 +111,7 @@ class Osc(FetchMethod):
         Generate a .oscrc to be used for this run.
         """
 
-        config_path = os.path.join(d.getVar('OSCDIR', True), "oscrc")
+        config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
         if (os.path.exists(config_path)):
             os.remove(config_path)
 
@@ -121,8 +120,8 @@ class Osc(FetchMethod):
         f.write("apisrv = %s\n" % ud.host)
         f.write("scheme = http\n")
         f.write("su-wrapper = su -c\n")
-        f.write("build-root = %s\n" % d.getVar('WORKDIR', True))
-        f.write("urllist = %s\n" % d.getVar("OSCURLLIST", True))
+        f.write("build-root = %s\n" % d.getVar('WORKDIR'))
+        f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
         f.write("extra-pkgs = gzip\n")
         f.write("\n")
         f.write("[%s]\n" % ud.host)

+ 10 - 11
bitbake/lib/bb/fetch2/perforce.py

@@ -26,7 +26,6 @@ BitBake 'Fetch' implementation for perforce
 import os
 import logging
 import bb
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import logger
@@ -44,13 +43,13 @@ class Perforce(FetchMethod):
         provided by the env, use it.  If P4PORT is specified by the recipe, use
         its values, which may override the settings in P4CONFIG.
         """
-        ud.basecmd = d.getVar('FETCHCMD_p4', True)
+        ud.basecmd = d.getVar('FETCHCMD_p4')
         if not ud.basecmd:
             ud.basecmd = "/usr/bin/env p4"
 
-        ud.dldir = d.getVar('P4DIR', True)
+        ud.dldir = d.getVar('P4DIR')
         if not ud.dldir:
-            ud.dldir = '%s/%s' % (d.getVar('DL_DIR', True), 'p4')
+            ud.dldir = '%s/%s' % (d.getVar('DL_DIR'), 'p4')
 
         path = ud.url.split('://')[1]
         path = path.split(';')[0]
@@ -62,7 +61,7 @@ class Perforce(FetchMethod):
             ud.path = path
 
         ud.usingp4config = False
-        p4port = d.getVar('P4PORT', True)
+        p4port = d.getVar('P4PORT')
 
         if p4port:
             logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
@@ -71,7 +70,7 @@ class Perforce(FetchMethod):
             logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
             ud.usingp4config = True
             p4cmd = '%s info | grep "Server address"' % ud.basecmd
-            bb.fetch2.check_network_access(d, p4cmd)
+            bb.fetch2.check_network_access(d, p4cmd, ud.url)
             ud.host = runfetchcmd(p4cmd, d, True)
             ud.host = ud.host.split(': ')[1].strip()
             logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
@@ -87,9 +86,9 @@ class Perforce(FetchMethod):
         cleanedhost = ud.host.replace(':', '.')
         ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath)
 
-        ud.setup_revisons(d)
+        ud.setup_revisions(d)
 
-        ud.localfile = data.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision), d)
+        ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
 
     def _buildp4command(self, ud, d, command, depot_filename=None):
         """
@@ -140,7 +139,7 @@ class Perforce(FetchMethod):
         'p4 files' command, including trailing '#rev' file revision indicator
         """
         p4cmd = self._buildp4command(ud, d, 'files')
-        bb.fetch2.check_network_access(d, p4cmd)
+        bb.fetch2.check_network_access(d, p4cmd, ud.url)
         p4fileslist = runfetchcmd(p4cmd, d, True)
         p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()]
 
@@ -171,7 +170,7 @@ class Perforce(FetchMethod):
 
         for afile in filelist:
             p4fetchcmd = self._buildp4command(ud, d, 'print', afile)
-            bb.fetch2.check_network_access(d, p4fetchcmd)
+            bb.fetch2.check_network_access(d, p4fetchcmd, ud.url)
             runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir)
 
         runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir)
@@ -191,7 +190,7 @@ class Perforce(FetchMethod):
     def _latest_revision(self, ud, d, name):
         """ Return the latest upstream scm revision number """
         p4cmd = self._buildp4command(ud, d, "changes")
-        bb.fetch2.check_network_access(d, p4cmd)
+        bb.fetch2.check_network_access(d, p4cmd, ud.url)
         tip = runfetchcmd(p4cmd, d, True)
 
         if not tip:

+ 4 - 4
bitbake/lib/bb/fetch2/repo.py

@@ -25,9 +25,9 @@ BitBake "Fetch" repo (git) implementation
 
 import os
 import bb
-from   bb    import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import runfetchcmd
+from   bb.fetch2 import logger
 
 class Repo(FetchMethod):
     """Class to fetch a module or modules from repo (git) repositories"""
@@ -51,17 +51,17 @@ class Repo(FetchMethod):
         if not ud.manifest.endswith('.xml'):
             ud.manifest += '.xml'
 
-        ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
+        ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch))
 
     def download(self, ud, d):
         """Fetch url"""
 
-        if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
+        if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
             logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
             return
 
         gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
-        repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
+        repodir = d.getVar("REPODIR") or os.path.join(d.getVar("DL_DIR"), "repo")
         codir = os.path.join(repodir, gitsrcname, ud.manifest)
 
         if ud.user:

+ 98 - 0
bitbake/lib/bb/fetch2/s3.py

@@ -0,0 +1,98 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+"""
+BitBake 'Fetch' implementation for Amazon AWS S3.
+
+Class for fetching files from Amazon S3 using the AWS Command Line Interface.
+The aws tool must be correctly installed and configured prior to use.
+
+"""
+
+# Copyright (C) 2017, Andre McCurdy <armccurdy@gmail.com>
+#
+# Based in part on bb.fetch2.wget:
+#    Copyright (C) 2003, 2004  Chris Larson
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Based on functions from the base bb module, Copyright 2003 Holger Schurig
+
+import os
+import bb
+import urllib.request, urllib.parse, urllib.error
+from bb.fetch2 import FetchMethod
+from bb.fetch2 import FetchError
+from bb.fetch2 import runfetchcmd
+
+class S3(FetchMethod):
+    """Class to fetch urls via 'aws s3'"""
+
+    def supports(self, ud, d):
+        """
+        Check to see if a given url can be fetched with s3.
+        """
+        return ud.type in ['s3']
+
+    def recommends_checksum(self, urldata):
+        return True
+
+    def urldata_init(self, ud, d):
+        if 'downloadfilename' in ud.parm:
+            ud.basename = ud.parm['downloadfilename']
+        else:
+            ud.basename = os.path.basename(ud.path)
+
+        ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
+
+        ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
+
+    def download(self, ud, d):
+        """
+        Fetch urls
+        Assumes localpath was called first
+        """
+
+        cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
+        bb.fetch2.check_network_access(d, cmd, ud.url)
+        runfetchcmd(cmd, d)
+
+        # Additional sanity checks copied from the wget class (although there
+        # are no known issues which mean these are required, treat the aws cli
+        # tool with a little healthy suspicion).
+
+        if not os.path.exists(ud.localpath):
+            raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath))
+
+        if os.path.getsize(ud.localpath) == 0:
+            os.remove(ud.localpath)
+            raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path))
+
+        return True
+
+    def checkstatus(self, fetch, ud, d):
+        """
+        Check the status of a URL
+        """
+
+        cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path)
+        bb.fetch2.check_network_access(d, cmd, ud.url)
+        output = runfetchcmd(cmd, d)
+
+        # "aws s3 ls s3://mybucket/foo" will exit with success even if the file
+        # is not found, so check output of the command to confirm success.
+
+        if not output:
+            raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path))
+
+        return True

+ 2 - 4
bitbake/lib/bb/fetch2/sftp.py

@@ -62,12 +62,10 @@ SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
 import os
 import bb
 import urllib.request, urllib.parse, urllib.error
-from bb import data
 from bb.fetch2 import URI
 from bb.fetch2 import FetchMethod
 from bb.fetch2 import runfetchcmd
 
-
 class SFTP(FetchMethod):
     """Class to fetch urls via 'sftp'"""
 
@@ -92,7 +90,7 @@ class SFTP(FetchMethod):
         else:
             ud.basename = os.path.basename(ud.path)
 
-        ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
+        ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
 
     def download(self, ud, d):
         """Fetch urls"""
@@ -104,7 +102,7 @@ class SFTP(FetchMethod):
             port = '-P %d' % urlo.port
             urlo.port = None
 
-        dldir = data.getVar('DL_DIR', d, True)
+        dldir = d.getVar('DL_DIR')
         lpath = os.path.join(dldir, ud.localfile)
 
         user = ''

+ 2 - 3
bitbake/lib/bb/fetch2/ssh.py

@@ -43,7 +43,6 @@ IETF secsh internet draft:
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 import re, os
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import logger
@@ -87,11 +86,11 @@ class SSH(FetchMethod):
         m = __pattern__.match(urldata.url)
         path = m.group('path')
         host = m.group('host')
-        urldata.localpath = os.path.join(d.getVar('DL_DIR', True),
+        urldata.localpath = os.path.join(d.getVar('DL_DIR'),
                 os.path.basename(os.path.normpath(path)))
 
     def download(self, urldata, d):
-        dldir = d.getVar('DL_DIR', True)
+        dldir = d.getVar('DL_DIR')
 
         m = __pattern__.match(urldata.url)
         path = m.group('path')

+ 10 - 11
bitbake/lib/bb/fetch2/svn.py

@@ -28,7 +28,6 @@ import sys
 import logging
 import bb
 import re
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import MissingParameterError
@@ -50,7 +49,7 @@ class Svn(FetchMethod):
         if not "module" in ud.parm:
             raise MissingParameterError('module', ud.url)
 
-        ud.basecmd = d.getVar('FETCHCMD_svn', True)
+        ud.basecmd = d.getVar('FETCHCMD_svn')
 
         ud.module = ud.parm["module"]
 
@@ -61,15 +60,15 @@ class Svn(FetchMethod):
 
         # Create paths to svn checkouts
         relpath = self._strip_leading_slashes(ud.path)
-        ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
+        ud.pkgdir = os.path.join(d.expand('${SVNDIR}'), ud.host, relpath)
         ud.moddir = os.path.join(ud.pkgdir, ud.module)
 
-        ud.setup_revisons(d)
+        ud.setup_revisions(d)
 
         if 'rev' in ud.parm:
             ud.revision = ud.parm['rev']
 
-        ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
+        ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
 
     def _buildsvncommand(self, ud, d, command):
         """
@@ -79,9 +78,9 @@ class Svn(FetchMethod):
 
         proto = ud.parm.get('protocol', 'svn')
 
-        svn_rsh = None
-        if proto == "svn+ssh" and "rsh" in ud.parm:
-            svn_rsh = ud.parm["rsh"]
+        svn_ssh = None
+        if proto == "svn+ssh" and "ssh" in ud.parm:
+            svn_ssh = ud.parm["ssh"]
 
         svnroot = ud.host + ud.path
 
@@ -113,8 +112,8 @@ class Svn(FetchMethod):
             else:
                 raise FetchError("Invalid svn command %s" % command, ud.url)
 
-        if svn_rsh:
-            svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
+        if svn_ssh:
+            svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd)
 
         return svncmd
 
@@ -173,7 +172,7 @@ class Svn(FetchMethod):
         """
         Return the latest upstream revision number
         """
-        bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"))
+        bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url)
 
         output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True)
 

+ 57 - 35
bitbake/lib/bb/fetch2/wget.py

@@ -30,10 +30,10 @@ import tempfile
 import subprocess
 import os
 import logging
+import errno
 import bb
 import bb.progress
 import urllib.request, urllib.parse, urllib.error
-from   bb import data
 from   bb.fetch2 import FetchMethod
 from   bb.fetch2 import FetchError
 from   bb.fetch2 import logger
@@ -84,19 +84,19 @@ class Wget(FetchMethod):
         else:
             ud.basename = os.path.basename(ud.path)
 
-        ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
+        ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
         if not ud.localfile:
-            ud.localfile = data.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."), d)
+            ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
 
-        self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
+        self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
 
-    def _runwget(self, ud, d, command, quiet):
+    def _runwget(self, ud, d, command, quiet, workdir=None):
 
         progresshandler = WgetProgressHandler(d)
 
         logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
-        bb.fetch2.check_network_access(d, command)
-        runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler)
+        bb.fetch2.check_network_access(d, command, ud.url)
+        runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
 
     def download(self, ud, d):
         """Fetch urls"""
@@ -104,13 +104,12 @@ class Wget(FetchMethod):
         fetchcmd = self.basecmd
 
         if 'downloadfilename' in ud.parm:
-            dldir = d.getVar("DL_DIR", True)
+            dldir = d.getVar("DL_DIR")
             bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
             fetchcmd += " -O " + dldir + os.sep + ud.localfile
 
-        if ud.user:
-            up = ud.user.split(":")
-            fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (up[0],up[1])
+        if ud.user and ud.pswd:
+            fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
 
         uri = ud.url.split(";")[0]
         if os.path.exists(ud.localpath):
@@ -208,8 +207,21 @@ class Wget(FetchMethod):
                     h.request(req.get_method(), req.selector, req.data, headers)
                 except socket.error as err: # XXX what error?
                     # Don't close connection when cache is enabled.
+                    # Instead, try to detect connections that are no longer
+                    # usable (for example, closed unexpectedly) and remove
+                    # them from the cache.
                     if fetch.connection_cache is None:
                         h.close()
+                    elif isinstance(err, OSError) and err.errno == errno.EBADF:
+                        # This happens when the server closes the connection despite the Keep-Alive.
+                        # Apparently urllib then uses the file descriptor, expecting it to be
+                        # connected, when in reality the connection is already gone.
+                        # We let the request fail and expect it to be
+                        # tried once more ("try_again" in check_status()),
+                        # with the dead connection removed from the cache.
+                        # If it still fails, we give up, which can happend for bad
+                        # HTTP proxy settings.
+                        fetch.connection_cache.remove_connection(h.host, h.port)
                     raise urllib.error.URLError(err)
                 else:
                     try:
@@ -238,6 +250,7 @@ class Wget(FetchMethod):
                         return ""
                     def close(self):
                         pass
+                    closed = False
 
                 resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
                 resp.code = r.status
@@ -271,11 +284,6 @@ class Wget(FetchMethod):
             """
             http_error_403 = http_error_405
 
-            """
-            Some servers (e.g. FusionForge) returns 406 Not Acceptable when they
-            actually mean 405 Method Not Allowed.
-            """
-            http_error_406 = http_error_405
 
         class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
             """
@@ -304,14 +312,29 @@ class Wget(FetchMethod):
             uri = ud.url.split(";")[0]
             r = urllib.request.Request(uri)
             r.get_method = lambda: "HEAD"
-
-            if ud.user:
+            # Some servers (FusionForge, as used on Alioth) require that the
+            # optional Accept header is set.
+            r.add_header("Accept", "*/*")
+            def add_basic_auth(login_str, request):
+                '''Adds Basic auth to http request, pass in login:password as string'''
                 import base64
-                encodeuser = base64.b64encode(ud.user.encode('utf-8')).decode("utf-8")
+                encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
                 authheader =  "Basic %s" % encodeuser
                 r.add_header("Authorization", authheader)
 
-            opener.open(r)
+            if ud.user:
+                add_basic_auth(ud.user, r)
+
+            try:
+                import netrc, urllib.parse
+                n = netrc.netrc()
+                login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
+                add_basic_auth("%s:%s" % (login, password), r)
+            except (TypeError, ImportError, IOError, netrc.NetrcParseError):
+                 pass
+
+            with opener.open(r) as response:
+                pass
         except urllib.error.URLError as e:
             if try_again:
                 logger.debug(2, "checkstatus: trying again")
@@ -398,17 +421,16 @@ class Wget(FetchMethod):
         Run fetch checkstatus to get directory information
         """
         f = tempfile.NamedTemporaryFile()
+        with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
+            agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
+            fetchcmd = self.basecmd
+            fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
+            try:
+                self._runwget(ud, d, fetchcmd, True, workdir=workdir)
+                fetchresult = f.read()
+            except bb.fetch2.BBFetchException:
+                fetchresult = ""
 
-        agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
-        fetchcmd = self.basecmd
-        fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
-        try:
-            self._runwget(ud, d, fetchcmd, True)
-            fetchresult = f.read()
-        except bb.fetch2.BBFetchException:
-            fetchresult = ""
-
-        f.close()
         return fetchresult
 
     def _check_latest_version(self, url, package, package_regex, current_version, ud, d):
@@ -535,7 +557,7 @@ class Wget(FetchMethod):
 
         # src.rpm extension was added only for rpm package. Can be removed if the rpm
         # packaged will always be considered as having to be manually upgraded
-        psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
+        psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
 
         # match name, version and archive type of a package
         package_regex_comp = re.compile("(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
@@ -543,7 +565,7 @@ class Wget(FetchMethod):
         self.suffix_regex_comp = re.compile(psuffix_regex)
 
         # compile regex, can be specific by package or generic regex
-        pn_regex = d.getVar('UPSTREAM_CHECK_REGEX', True)
+        pn_regex = d.getVar('UPSTREAM_CHECK_REGEX')
         if pn_regex:
             package_custom_regex_comp = re.compile(pn_regex)
         else:
@@ -564,7 +586,7 @@ class Wget(FetchMethod):
         sanity check to ensure same name and type.
         """
         package = ud.path.split("/")[-1]
-        current_version = ['', d.getVar('PV', True), '']
+        current_version = ['', d.getVar('PV'), '']
 
         """possible to have no version in pkg name, such as spectrum-fw"""
         if not re.search("\d+", package):
@@ -579,7 +601,7 @@ class Wget(FetchMethod):
         bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern))
 
         uri = ""
-        regex_uri = d.getVar("UPSTREAM_CHECK_URI", True)
+        regex_uri = d.getVar("UPSTREAM_CHECK_URI")
         if not regex_uri:
             path = ud.path.split(package)[0]
 
@@ -588,7 +610,7 @@ class Wget(FetchMethod):
             dirver_regex = re.compile("(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
             m = dirver_regex.search(path)
             if m:
-                pn = d.getVar('PN', True)
+                pn = d.getVar('PN')
                 dirver = m.group('dirver')
 
                 dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn)))

+ 161 - 172
bitbake/lib/bb/main.py

@@ -28,6 +28,8 @@ import logging
 import optparse
 import warnings
 import fcntl
+import time
+import traceback
 
 import bb
 from bb import event
@@ -37,11 +39,17 @@ from bb import ui
 from bb import server
 from bb import cookerdata
 
+import bb.server.process
+import bb.server.xmlrpcclient
+
 logger = logging.getLogger("BitBake")
 
 class BBMainException(Exception):
     pass
 
+class BBMainFatal(bb.BBHandledException):
+    pass
+
 def present_options(optionlist):
     if len(optionlist) > 1:
         return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]])
@@ -58,9 +66,6 @@ class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
         if option.dest == 'ui':
             valid_uis = list_extension_modules(bb.ui, 'main')
             option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
-        elif option.dest == 'servertype':
-            valid_server_types = list_extension_modules(bb.server, 'BitBakeServer')
-            option.help = option.help.replace('@CHOICES@', present_options(valid_server_types))
 
         return optparse.IndentedHelpFormatter.format_option(self, option)
 
@@ -148,11 +153,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
                                "failed and anything depending on it cannot be built, as much as "
                                "possible will be built before stopping.")
 
-        parser.add_option("-a", "--tryaltconfigs", action="store_true",
-                          dest="tryaltconfigs", default=False,
-                          help="Continue with builds by trying to use alternative providers "
-                               "where possible.")
-
         parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
                           help="Force the specified targets/task to run (invalidating any "
                                "existing stamp file).")
@@ -174,13 +174,24 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
                           help="Read the specified file after bitbake.conf.")
 
         parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
-                          help="Output more log message data to the terminal.")
+                          help="Enable tracing of shell tasks (with 'set -x'). "
+                               "Also print bb.note(...) messages to stdout (in "
+                               "addition to writing them to ${T}/log.do_<task>).")
 
         parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
-                          help="Increase the debug level. You can specify this more than once.")
-
-        parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
-                          help="Output less log message data to the terminal.")
+                          help="Increase the debug level. You can specify this "
+                               "more than once. -D sets the debug level to 1, "
+                               "where only bb.debug(1, ...) messages are printed "
+                               "to stdout; -DD sets the debug level to 2, where "
+                               "both bb.debug(1, ...) and bb.debug(2, ...) "
+                               "messages are printed; etc. Without -D, no debug "
+                               "messages are printed. Note that -D only affects "
+                               "output to stdout. All debug messages are written "
+                               "to ${T}/log.do_taskname, regardless of the debug "
+                               "level.")
+
+        parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
+                          help="Output less log message data to the terminal. You can specify this more than once.")
 
         parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
                           help="Don't execute, just go through the motions.")
@@ -227,11 +238,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
                           default=os.environ.get('BITBAKE_UI', 'knotty'),
                           help="The user interface to use (@CHOICES@ - default %default).")
 
-        # @CHOICES@ is substituted out by BitbakeHelpFormatter above
-        parser.add_option("-t", "--servertype", action="store", dest="servertype",
-                          default=["process", "xmlrpc"]["BBSERVER" in os.environ],
-                          help="Choose which server type to use (@CHOICES@ - default %default).")
-
         parser.add_option("", "--token", action="store", dest="xmlrpctoken",
                           default=os.environ.get("BBTOKEN"),
                           help="Specify the connection token to be used when connecting "
@@ -247,15 +253,14 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
                           help="Run bitbake without a UI, only starting a server "
                                "(cooker) process.")
 
-        parser.add_option("", "--foreground", action="store_true",
-                          help="Run bitbake server in foreground.")
-
         parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
-                          help="The name/address for the bitbake server to bind to.")
+                          help="The name/address for the bitbake xmlrpc server to bind to.")
 
-        parser.add_option("-T", "--idle-timeout", type=int,
-                          default=int(os.environ.get("BBTIMEOUT", "0")),
-                          help="Set timeout to unload bitbake server due to inactivity")
+        parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
+                          default=os.getenv("BB_SERVER_TIMEOUT"),
+                          help="Set timeout to unload bitbake server due to inactivity, "
+                                "set to -1 means no unload, "
+                                "default: Environment variable BB_SERVER_TIMEOUT.")
 
         parser.add_option("", "--no-setscene", action="store_true",
                           dest="nosetscene", default=False,
@@ -272,7 +277,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
 
         parser.add_option("-m", "--kill-server", action="store_true",
                           dest="kill_server", default=False,
-                          help="Terminate the remote server.")
+                          help="Terminate any running bitbake server.")
 
         parser.add_option("", "--observe-only", action="store_true",
                           dest="observe_only", default=False,
@@ -287,6 +292,9 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
                           help="Writes the event log of the build to a bitbake event json file. "
                                "Use '' (empty string) to assign the name automatically.")
 
+        parser.add_option("", "--runall", action="store", dest="runall",
+                          help="Run the specified task for all build targets and their dependencies.")
+
         options, targets = parser.parse_args(argv)
 
         if options.quiet and options.verbose:
@@ -308,69 +316,20 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
             eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S")
             options.writeeventlog = eventlog
 
-        # if BBSERVER says to autodetect, let's do that
-        if options.remote_server:
-            port = -1
-            if options.remote_server != 'autostart':
-                host, port = options.remote_server.split(":", 2)
+        if options.bind:
+            try:
+                #Checking that the port is a number and is a ':' delimited value
+                (host, port) = options.bind.split(':')
                 port = int(port)
-            # use automatic port if port set to -1, means read it from
-            # the bitbake.lock file; this is a bit tricky, but we always expect
-            # to be in the base of the build directory if we need to have a
-            # chance to start the server later, anyway
-            if port == -1:
-                lock_location = "./bitbake.lock"
-                # we try to read the address at all times; if the server is not started,
-                # we'll try to start it after the first connect fails, below
-                try:
-                    lf = open(lock_location, 'r')
-                    remotedef = lf.readline()
-                    [host, port] = remotedef.split(":")
-                    port = int(port)
-                    lf.close()
-                    options.remote_server = remotedef
-                except Exception as e:
-                    if options.remote_server != 'autostart':
-                        raise BBMainException("Failed to read bitbake.lock (%s), invalid port" % str(e))
+            except (ValueError,IndexError):
+                raise BBMainException("FATAL: Malformed host:port bind parameter")
+            options.xmlrpcinterface = (host, port)
+        else:
+            options.xmlrpcinterface = (None, 0)
 
         return options, targets[1:]
 
 
-def start_server(servermodule, configParams, configuration, features):
-    server = servermodule.BitBakeServer()
-    single_use = not configParams.server_only and os.getenv('BBSERVER') != 'autostart'
-    if configParams.bind:
-        (host, port) = configParams.bind.split(':')
-        server.initServer((host, int(port)), single_use=single_use,
-                          idle_timeout=configParams.idle_timeout)
-        configuration.interface = [server.serverImpl.host, server.serverImpl.port]
-    else:
-        server.initServer(single_use=single_use)
-        configuration.interface = []
-
-    try:
-        configuration.setServerRegIdleCallback(server.getServerIdleCB())
-
-        cooker = bb.cooker.BBCooker(configuration, features)
-
-        server.addcooker(cooker)
-        server.saveConnectionDetails()
-    except Exception as e:
-        while hasattr(server, "event_queue"):
-            import queue
-            try:
-                event = server.event_queue.get(block=False)
-            except (queue.Empty, IOError):
-                break
-            if isinstance(event, logging.LogRecord):
-                logger.handle(event)
-        raise
-    if not configParams.foreground:
-        server.detach()
-    cooker.lock.close()
-    return server
-
-
 def bitbake_main(configParams, configuration):
 
     # Python multiprocessing requires /dev/shm on Linux
@@ -389,44 +348,17 @@ def bitbake_main(configParams, configuration):
     except:
         pass
 
-
     configuration.setConfigParameters(configParams)
 
-    ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
-    servermodule = import_extension_module(bb.server, configParams.servertype, 'BitBakeServer')
-
-    if configParams.server_only:
-        if configParams.servertype != "xmlrpc":
-            raise BBMainException("FATAL: If '--server-only' is defined, we must set the "
-                                  "servertype as 'xmlrpc'.\n")
-        if not configParams.bind:
-            raise BBMainException("FATAL: The '--server-only' option requires a name/address "
-                                  "to bind to with the -B option.\n")
-        if configParams.remote_server:
+    if configParams.server_only and configParams.remote_server:
             raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
                                   ("the BBSERVER environment variable" if "BBSERVER" in os.environ \
                                    else "the '--remote-server' option"))
 
-    elif configParams.foreground:
-        raise BBMainException("FATAL: The '--foreground' option can only be used "
-                              "with --server-only.\n")
-
-    if configParams.bind and configParams.servertype != "xmlrpc":
-        raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must "
-                              "set the servertype as 'xmlrpc'.\n")
-
-    if configParams.remote_server and configParams.servertype != "xmlrpc":
-        raise BBMainException("FATAL: If '--remote-server' is defined, we must "
-                              "set the servertype as 'xmlrpc'.\n")
-
-    if configParams.observe_only and (not configParams.remote_server or configParams.bind):
+    if configParams.observe_only and not (configParams.remote_server or configParams.bind):
         raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
                               "connecting to a server.\n")
 
-    if configParams.kill_server and not configParams.remote_server:
-        raise BBMainException("FATAL: '--kill-server' can only be used to "
-                              "terminate a remote server")
-
     if "BBDEBUG" in os.environ:
         level = int(os.environ["BBDEBUG"])
         if level > configuration.debug:
@@ -435,6 +367,34 @@ def bitbake_main(configParams, configuration):
     bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
                           configuration.debug_domains)
 
+    server_connection, ui_module = setup_bitbake(configParams, configuration)
+    # No server connection
+    if server_connection is None:
+        if configParams.status_only:
+            return 1
+        if configParams.kill_server:
+            return 0
+
+    if not configParams.server_only:
+        if configParams.status_only:
+            server_connection.terminate()
+            return 0
+
+        try:
+            for event in bb.event.ui_queue:
+                server_connection.events.queue_event(event)
+            bb.event.ui_queue = []
+
+            return ui_module.main(server_connection.connection, server_connection.events,
+                                  configParams)
+        finally:
+            server_connection.terminate()
+    else:
+        return 0
+
+    return 1
+
+def setup_bitbake(configParams, configuration, extrafeatures=None):
     # Ensure logging messages get sent to the UI as events
     handler = bb.event.LogHandler()
     if not configParams.status_only:
@@ -444,72 +404,101 @@ def bitbake_main(configParams, configuration):
     # Clear away any spurious environment variables while we stoke up the cooker
     cleanedvars = bb.utils.clean_environment()
 
-    featureset = []
-    if not configParams.server_only:
-        # Collect the feature set for the UI
-        featureset = getattr(ui_module, "featureSet", [])
-
     if configParams.server_only:
-        for param in ('prefile', 'postfile'):
-            value = getattr(configParams, param)
-            if value:
-                setattr(configuration, "%s_server" % param, value)
-                param = "%s_server" % param
-
-    if not configParams.remote_server:
-        # we start a server with a given configuration
-        server = start_server(servermodule, configParams, configuration, featureset)
-        bb.event.ui_queue = []
+        featureset = []
+        ui_module = None
     else:
-        if os.getenv('BBSERVER') == 'autostart':
-            if configParams.remote_server == 'autostart' or \
-               not servermodule.check_connection(configParams.remote_server, timeout=2):
-                configParams.bind = 'localhost:0'
-                srv = start_server(servermodule, configParams, configuration, featureset)
-                configParams.remote_server = '%s:%d' % tuple(configuration.interface)
-                bb.event.ui_queue = []
-
-        # we start a stub server that is actually a XMLRPClient that connects to a real server
-        server = servermodule.BitBakeXMLRPCClient(configParams.observe_only,
-                                                  configParams.xmlrpctoken)
-        server.saveConnectionDetails(configParams.remote_server)
-
-
-    if not configParams.server_only:
-        try:
-            server_connection = server.establishConnection(featureset)
-        except Exception as e:
-            bb.fatal("Could not connect to server %s: %s" % (configParams.remote_server, str(e)))
+        ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
+        # Collect the feature set for the UI
+        featureset = getattr(ui_module, "featureSet", [])
 
-        if configParams.kill_server:
-            server_connection.connection.terminateServer()
-            bb.event.ui_queue = []
-            return 0
+    if extrafeatures:
+        for feature in extrafeatures:
+            if not feature in featureset:
+                featureset.append(feature)
 
-        server_connection.setupEventQueue()
+    server_connection = None
 
-        # Restore the environment in case the UI needs it
-        for k in cleanedvars:
-            os.environ[k] = cleanedvars[k]
+    if configParams.remote_server:
+        # Connect to a remote XMLRPC server
+        server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
+                                                                 configParams.observe_only, configParams.xmlrpctoken)
+    else:
+        retries = 8
+        while retries:
+            try:
+                topdir, lock = lockBitbake()
+                sockname = topdir + "/bitbake.sock"
+                if lock:
+                    if configParams.status_only or configParams.kill_server:
+                        logger.info("bitbake server is not running.")
+                        lock.close()
+                        return None, None
+                    # we start a server with a given configuration
+                    logger.info("Starting bitbake server...")
+                    # Clear the event queue since we already displayed messages
+                    bb.event.ui_queue = []
+                    server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)
+
+                else:
+                    logger.info("Reconnecting to bitbake server...")
+                    if not os.path.exists(sockname):
+                        print("Previous bitbake instance shutting down?, waiting to retry...")
+                        i = 0
+                        lock = None
+                        # Wait for 5s or until we can get the lock
+                        while not lock and i < 50:
+                            time.sleep(0.1)
+                            _, lock = lockBitbake()
+                            i += 1
+                        if lock:
+                            bb.utils.unlockfile(lock)
+                        raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
+                if not configParams.server_only:
+                    try:
+                        server_connection = bb.server.process.connectProcessServer(sockname, featureset)
+                    except EOFError:
+                        # The server may have been shutting down but not closed the socket yet. If that happened,
+                        # ignore it.
+                        pass
+
+                if server_connection or configParams.server_only:
+                    break
+            except BBMainFatal:
+                raise
+            except (Exception, bb.server.process.ProcessTimeout) as e:
+                if not retries:
+                    raise
+                retries -= 1
+                if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
+                    logger.info("Retrying server connection...")
+                else:
+                    logger.info("Retrying server connection... (%s)" % traceback.format_exc())
+            if not retries:
+                bb.fatal("Unable to connect to bitbake server, or start one")
+            if retries < 5:
+                time.sleep(5)
+
+    if configParams.kill_server:
+        server_connection.connection.terminateServer()
+        server_connection.terminate()
+        bb.event.ui_queue = []
+        logger.info("Terminated bitbake server.")
+        return None, None
 
-        logger.removeHandler(handler)
+    # Restore the environment in case the UI needs it
+    for k in cleanedvars:
+        os.environ[k] = cleanedvars[k]
 
+    logger.removeHandler(handler)
 
-        if configParams.status_only:
-            server_connection.terminate()
-            return 0
+    return server_connection, ui_module
 
-        try:
-            return ui_module.main(server_connection.connection, server_connection.events,
-                                  configParams)
-        finally:
-            bb.event.ui_queue = []
-            server_connection.terminate()
-    else:
-        print("Bitbake server address: %s, server port: %s" % (server.serverImpl.host,
-                                                               server.serverImpl.port))
-        if configParams.foreground:
-            server.serverImpl.serve_forever()
-        return 0
+def lockBitbake():
+    topdir = bb.cookerdata.findTopdir()
+    if not topdir:
+        bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?")
+        raise BBMainFatal
+    lockfile = topdir + "/bitbake.lock"
+    return topdir, bb.utils.lockfile(lockfile, False, False)
 
-    return 1

+ 16 - 11
bitbake/lib/bb/monitordisk.py

@@ -129,7 +129,7 @@ def getDiskData(BBDirs, configuration):
             bb.utils.mkdirhier(path)
         dev = getMountedDev(path)
         # Use path/action as the key
-        devDict[os.path.join(path, action)] = [dev, minSpace, minInode]
+        devDict[(path, action)] = [dev, minSpace, minInode]
 
     return devDict
 
@@ -141,7 +141,7 @@ def getInterval(configuration):
     spaceDefault = 50 * 1024 * 1024
     inodeDefault = 5 * 1024
 
-    interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True)
+    interval = configuration.getVar("BB_DISKMON_WARNINTERVAL")
     if not interval:
         return spaceDefault, inodeDefault
     else:
@@ -179,7 +179,7 @@ class diskMonitor:
         self.enableMonitor = False
         self.configuration = configuration
 
-        BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
+        BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
         if BBDirs:
             self.devDict = getDiskData(BBDirs, configuration)
             if self.devDict:
@@ -205,18 +205,21 @@ class diskMonitor:
         """ Take action for the monitor """
 
         if self.enableMonitor:
-            for k in self.devDict:
-                path = os.path.dirname(k)
-                action = os.path.basename(k)
-                dev = self.devDict[k][0]
-                minSpace = self.devDict[k][1]
-                minInode = self.devDict[k][2]
+            diskUsage = {}
+            for k, attributes in self.devDict.items():
+                path, action = k
+                dev, minSpace, minInode = attributes
 
                 st = os.statvfs(path)
 
-                # The free space, float point number
+                # The available free space, integer number
                 freeSpace = st.f_bavail * st.f_frsize
 
+                # Send all relevant information in the event.
+                freeSpaceRoot = st.f_bfree * st.f_frsize
+                totalSpace = st.f_blocks * st.f_frsize
+                diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace)
+
                 if minSpace and freeSpace < minSpace:
                     # Always show warning, the self.checked would always be False if the action is WARN
                     if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
@@ -235,7 +238,7 @@ class diskMonitor:
                         rq.finish_runqueue(True)
                         bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
 
-                # The free inodes, float point number
+                # The free inodes, integer number
                 freeInode = st.f_favail
 
                 if minInode and freeInode < minInode:
@@ -260,4 +263,6 @@ class diskMonitor:
                         self.checked[k] = True
                         rq.finish_runqueue(True)
                         bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
+
+            bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration)
         return

+ 22 - 0
bitbake/lib/bb/msg.py

@@ -201,3 +201,25 @@ def fatal(msgdomain, msg):
         logger = logging.getLogger("BitBake")
     logger.critical(msg)
     sys.exit(1)
+
+def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'):
+    """Standalone logger creation function"""
+    logger = logging.getLogger(name)
+    console = logging.StreamHandler(output)
+    format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
+    if color == 'always' or (color == 'auto' and output.isatty()):
+        format.enable_color()
+    console.setFormatter(format)
+    if preserve_handlers:
+        logger.addHandler(console)
+    else:
+        logger.handlers = [console]
+    logger.setLevel(level)
+    return logger
+
+def has_console_handler(logger):
+    for handler in logger.handlers:
+        if isinstance(handler, logging.StreamHandler):
+            if handler.stream in [sys.stderr, sys.stdout]:
+                return True
+    return False

+ 5 - 1
bitbake/lib/bb/parse/__init__.py

@@ -84,6 +84,10 @@ def update_cache(f):
         logger.debug(1, "Updating mtime cache for %s" % f)
         update_mtime(f)
 
+def clear_cache():
+    global __mtime_cache
+    __mtime_cache = {}
+
 def mark_dependency(d, f):
     if f.startswith('./'):
         f = "%s/%s" % (os.getcwd(), f[2:])
@@ -123,7 +127,7 @@ def init_parser(d):
 
 def resolve_file(fn, d):
     if not os.path.isabs(fn):
-        bbpath = d.getVar("BBPATH", True)
+        bbpath = d.getVar("BBPATH")
         newfn, attempts = bb.utils.which(bbpath, fn, history=True)
         for af in attempts:
             mark_dependency(d, af)

+ 5 - 74
bitbake/lib/bb/parse/ast.py

@@ -30,8 +30,6 @@ import itertools
 from bb import methodpool
 from bb.parse import logger
 
-_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
-
 class StatementGroup(list):
     def eval(self, data):
         for statement in self:
@@ -132,7 +130,6 @@ class DataNode(AstNode):
                 val = groupd["value"]
         elif "colon" in groupd and groupd["colon"] != None:
             e = data.createCopy()
-            bb.data.update_data(e)
             op = "immediate"
             val = e.expand(groupd["value"], key + "[:=]")
         elif "append" in groupd and groupd["append"] != None:
@@ -347,19 +344,18 @@ def finalize(fn, d, variant = None):
         if not handlerfn:
             bb.fatal("Undefined event handler function '%s'" % var)
         handlerln = int(d.getVarFlag(var, "lineno", False))
-        bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
+        bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
 
     bb.event.fire(bb.event.RecipePreFinalise(fn), d)
 
     bb.data.expandKeys(d)
-    bb.data.update_data(d)
     code = []
     for funcname in d.getVar("__BBANONFUNCS", False) or []:
         code.append("%s(d)" % funcname)
     bb.utils.better_exec("\n".join(code), {"d": d})
-    bb.data.update_data(d)
 
     tasklist = d.getVar('__BBTASKS', False) or []
+    bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
     bb.build.add_tasks(tasklist, d)
 
     bb.parse.siggen.finalise(fn, d, variant)
@@ -385,29 +381,8 @@ def _create_variants(datastores, names, function, onlyfinalise):
             else:
                 create_variant("%s-%s" % (variant, name), datastores[variant], name)
 
-def _expand_versions(versions):
-    def expand_one(version, start, end):
-        for i in range(start, end + 1):
-            ver = _bbversions_re.sub(str(i), version, 1)
-            yield ver
-
-    versions = iter(versions)
-    while True:
-        try:
-            version = next(versions)
-        except StopIteration:
-            break
-
-        range_ver = _bbversions_re.search(version)
-        if not range_ver:
-            yield version
-        else:
-            newversions = expand_one(version, int(range_ver.group("from")),
-                                     int(range_ver.group("to")))
-            versions = itertools.chain(newversions, versions)
-
 def multi_finalize(fn, d):
-    appends = (d.getVar("__BBAPPEND", True) or "").split()
+    appends = (d.getVar("__BBAPPEND") or "").split()
     for append in appends:
         logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
         bb.parse.BBHandler.handle(append, d, True)
@@ -422,51 +397,7 @@ def multi_finalize(fn, d):
         d.setVar("__SKIPPED", e.args[0])
     datastores = {"": safe_d}
 
-    versions = (d.getVar("BBVERSIONS", True) or "").split()
-    if versions:
-        pv = orig_pv = d.getVar("PV", True)
-        baseversions = {}
-
-        def verfunc(ver, d, pv_d = None):
-            if pv_d is None:
-                pv_d = d
-
-            overrides = d.getVar("OVERRIDES", True).split(":")
-            pv_d.setVar("PV", ver)
-            overrides.append(ver)
-            bpv = baseversions.get(ver) or orig_pv
-            pv_d.setVar("BPV", bpv)
-            overrides.append(bpv)
-            d.setVar("OVERRIDES", ":".join(overrides))
-
-        versions = list(_expand_versions(versions))
-        for pos, version in enumerate(list(versions)):
-            try:
-                pv, bpv = version.split(":", 2)
-            except ValueError:
-                pass
-            else:
-                versions[pos] = pv
-                baseversions[pv] = bpv
-
-        if pv in versions and not baseversions.get(pv):
-            versions.remove(pv)
-        else:
-            pv = versions.pop()
-
-            # This is necessary because our existing main datastore
-            # has already been finalized with the old PV, we need one
-            # that's been finalized with the new PV.
-            d = bb.data.createCopy(safe_d)
-            verfunc(pv, d, safe_d)
-            try:
-                finalize(fn, d)
-            except bb.parse.SkipRecipe as e:
-                d.setVar("__SKIPPED", e.args[0])
-
-        _create_variants(datastores, versions, verfunc, onlyfinalise)
-
-    extended = d.getVar("BBCLASSEXTEND", True) or ""
+    extended = d.getVar("BBCLASSEXTEND") or ""
     if extended:
         # the following is to support bbextends with arguments, for e.g. multilib
         # an example is as follows:
@@ -484,7 +415,7 @@ def multi_finalize(fn, d):
             else:
                 extendedmap[ext] = ext
 
-        pn = d.getVar("PN", True)
+        pn = d.getVar("PN")
         def extendfunc(name, d):
             if name != extendedmap[name]:
                 d.setVar("BBEXTENDCURR", extendedmap[name])

+ 13 - 13
bitbake/lib/bb/parse/parse_py/BBHandler.py

@@ -66,7 +66,7 @@ def inherit(files, fn, lineno, d):
             file = os.path.join('classes', '%s.bbclass' % file)
 
         if not os.path.isabs(file):
-            bbpath = d.getVar("BBPATH", True)
+            bbpath = d.getVar("BBPATH")
             abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
             for af in attempts:
                 if af != abs_fn:
@@ -87,17 +87,17 @@ def get_statements(filename, absolute_filename, base_name):
     try:
         return cached_statements[absolute_filename]
     except KeyError:
-        file = open(absolute_filename, 'r')
-        statements = ast.StatementGroup()
-
-        lineno = 0
-        while True:
-            lineno = lineno + 1
-            s = file.readline()
-            if not s: break
-            s = s.rstrip()
-            feeder(lineno, s, filename, base_name, statements)
-        file.close()
+        with open(absolute_filename, 'r') as f:
+            statements = ast.StatementGroup()
+
+            lineno = 0
+            while True:
+                lineno = lineno + 1
+                s = f.readline()
+                if not s: break
+                s = s.rstrip()
+                feeder(lineno, s, filename, base_name, statements)
+
         if __inpython__:
             # add a blank line to close out any python definition
             feeder(lineno, "", filename, base_name, statements, eof=True)
@@ -144,7 +144,7 @@ def handle(fn, d, include):
     try:
         statements.eval(d)
     except bb.parse.SkipRecipe:
-        bb.data.setVar("__SKIPPED", True, d)
+        d.setVar("__SKIPPED", True)
         if include == 0:
             return { "" : d }
 

+ 20 - 12
bitbake/lib/bb/parse/parse_py/ConfHandler.py

@@ -32,8 +32,8 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
 
 __config_regexp__  = re.compile( r"""
     ^
-    (?P<exp>export\s*)?
-    (?P<var>[a-zA-Z0-9\-~_+.${}/]+?)
+    (?P<exp>export\s+)?
+    (?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
     (\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
 
     \s* (
@@ -56,9 +56,9 @@ __config_regexp__  = re.compile( r"""
     """, re.X)
 __include_regexp__ = re.compile( r"include\s+(.+)" )
 __require_regexp__ = re.compile( r"require\s+(.+)" )
-__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/]+)$" )
-__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)$" )
-__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)\[([a-zA-Z0-9\-_+.${}/]+)\]$" )
+__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
+__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
+__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
 
 def init(data):
     topdir = data.getVar('TOPDIR', False)
@@ -69,30 +69,38 @@ def init(data):
 def supports(fn, d):
     return fn[-5:] == ".conf"
 
-def include(parentfn, fn, lineno, data, error_out):
+def include(parentfn, fns, lineno, data, error_out):
     """
     error_out: A string indicating the verb (e.g. "include", "inherit") to be
     used in a ParseError that will be raised if the file to be included could
     not be included. Specify False to avoid raising an error in this case.
     """
+    fns = data.expand(fns)
+    parentfn = data.expand(parentfn)
+
+    # "include" or "require" accept zero to n space-separated file names to include.
+    for fn in fns.split():
+        include_single_file(parentfn, fn, lineno, data, error_out)
+
+def include_single_file(parentfn, fn, lineno, data, error_out):
+    """
+    Helper function for include() which does not expand or split its parameters.
+    """
     if parentfn == fn: # prevent infinite recursion
         return None
 
-    fn = data.expand(fn)
-    parentfn = data.expand(parentfn)
-
     if not os.path.isabs(fn):
         dname = os.path.dirname(parentfn)
-        bbpath = "%s:%s" % (dname, data.getVar("BBPATH", True))
+        bbpath = "%s:%s" % (dname, data.getVar("BBPATH"))
         abs_fn, attempts = bb.utils.which(bbpath, fn, history=True)
         if abs_fn and bb.parse.check_dependency(data, abs_fn):
-            logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE', True)))
+            logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE')))
         for af in attempts:
             bb.parse.mark_dependency(data, af)
         if abs_fn:
             fn = abs_fn
     elif bb.parse.check_dependency(data, fn):
-        logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE', True)))
+        logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE')))
 
     try:
         bb.parse.handle(fn, data, True)

+ 3 - 7
bitbake/lib/bb/persist_data.py

@@ -28,11 +28,7 @@ import sys
 import warnings
 from bb.compat import total_ordering
 from collections import Mapping
-
-try:
-    import sqlite3
-except ImportError:
-    from pysqlite2 import dbapi2 as sqlite3
+import sqlite3
 
 sqlversion = sqlite3.sqlite_version_info
 if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
@@ -207,8 +203,8 @@ def connect(database):
 def persist(domain, d):
     """Convenience factory for SQLTable objects based upon metadata"""
     import bb.utils
-    cachedir = (d.getVar("PERSISTENT_DIR", True) or
-                d.getVar("CACHE", True))
+    cachedir = (d.getVar("PERSISTENT_DIR") or
+                d.getVar("CACHE"))
     if not cachedir:
         logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
         sys.exit(1)

+ 45 - 38
bitbake/lib/bb/process.py

@@ -94,45 +94,52 @@ def _logged_communicate(pipe, log, input, extrafiles):
                 if data is not None:
                     func(data)
 
+    def read_all_pipes(log, rin, outdata, errdata):
+        rlist = rin
+        stdoutbuf = b""
+        stderrbuf = b""
+
+        try:
+            r,w,e = select.select (rlist, [], [], 1)
+        except OSError as e:
+            if e.errno != errno.EINTR:
+                raise
+
+        readextras(r)
+
+        if pipe.stdout in r:
+            data = stdoutbuf + pipe.stdout.read()
+            if data is not None and len(data) > 0:
+                try:
+                    data = data.decode("utf-8")
+                    outdata.append(data)
+                    log.write(data)
+                    log.flush()
+                    stdoutbuf = b""
+                except UnicodeDecodeError:
+                    stdoutbuf = data
+
+        if pipe.stderr in r:
+            data = stderrbuf + pipe.stderr.read()
+            if data is not None and len(data) > 0:
+                try:
+                    data = data.decode("utf-8")
+                    errdata.append(data)
+                    log.write(data)
+                    log.flush()
+                    stderrbuf = b""
+                except UnicodeDecodeError:
+                    stderrbuf = data
+
     try:
+        # Read all pipes while the process is open
         while pipe.poll() is None:
-            rlist = rin
-            stdoutbuf = b""
-            stderrbuf = b""
-            try:
-                r,w,e = select.select (rlist, [], [], 1)
-            except OSError as e:
-                if e.errno != errno.EINTR:
-                    raise
-
-            if pipe.stdout in r:
-                data = stdoutbuf + pipe.stdout.read()
-                if data is not None and len(data) > 0:
-                    try:
-                        data = data.decode("utf-8")
-                        outdata.append(data)
-                        log.write(data)
-                        stdoutbuf = b""
-                    except UnicodeDecodeError:
-                        stdoutbuf = data
-
-            if pipe.stderr in r:
-                data = stderrbuf + pipe.stderr.read()
-                if data is not None and len(data) > 0:
-                    try:
-                        data = data.decode("utf-8")
-                        errdata.append(data)
-                        log.write(data)
-                        stderrbuf = b""
-                    except UnicodeDecodeError:
-                        stderrbuf = data
-
-            readextras(r)
-
-    finally:    
-        log.flush()
+            read_all_pipes(log, rin, outdata, errdata)
 
-    readextras([fobj for fobj, _ in extrafiles])
+        # Pocess closed, drain all pipes...
+        read_all_pipes(log, rin, outdata, errdata)
+    finally:
+        log.flush()
 
     if pipe.stdout is not None:
         pipe.stdout.close()
@@ -162,9 +169,9 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
         stdout, stderr = _logged_communicate(pipe, log, input, extrafiles)
     else:
         stdout, stderr = pipe.communicate(input)
-        if stdout:
+        if not stdout is None:
             stdout = stdout.decode("utf-8")
-        if stderr:
+        if not stderr is None:
             stderr = stderr.decode("utf-8")
 
     if pipe.returncode != 0:

+ 9 - 10
bitbake/lib/bb/providers.py

@@ -48,7 +48,6 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
 
     # Need to ensure data store is expanded
     localdata = data.createCopy(cfgData)
-    bb.data.update_data(localdata)
     bb.data.expandKeys(localdata)
 
     preferred_versions = {}
@@ -123,11 +122,11 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
 
     # pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
     # hence we do this manually rather than use OVERRIDES
-    preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn, True)
+    preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
     if not preferred_v:
-        preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn, True)
+        preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
     if not preferred_v:
-        preferred_v = cfgData.getVar("PREFERRED_VERSION", True)
+        preferred_v = cfgData.getVar("PREFERRED_VERSION")
 
     if preferred_v:
         m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
@@ -245,17 +244,17 @@ def _filterProviders(providers, item, cfgData, dataCache):
             pkg_pn[pn] = []
         pkg_pn[pn].append(p)
 
-    logger.debug(1, "providers for %s are: %s", item, list(pkg_pn.keys()))
+    logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
 
     # First add PREFERRED_VERSIONS
-    for pn in pkg_pn:
+    for pn in sorted(pkg_pn):
         sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
         preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
         if preferred_versions[pn][1]:
             eligible.append(preferred_versions[pn][1])
 
     # Now add latest versions
-    for pn in sortpkg_pn:
+    for pn in sorted(sortpkg_pn):
         if pn in preferred_versions and preferred_versions[pn][1]:
             continue
         preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
@@ -289,7 +288,7 @@ def filterProviders(providers, item, cfgData, dataCache):
 
     eligible = _filterProviders(providers, item, cfgData, dataCache)
 
-    prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item, True)
+    prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item)
     if prefervar:
         dataCache.preferred[item] = prefervar
 
@@ -318,7 +317,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
     eligible = _filterProviders(providers, item, cfgData, dataCache)
 
     # First try and match any PREFERRED_RPROVIDER entry
-    prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item, True)
+    prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item)
     foundUnique = False
     if prefervar:
         for p in eligible:
@@ -345,7 +344,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
             pn = dataCache.pkg_fn[p]
             provides = dataCache.pn_provides[pn]
             for provide in provides:
-                prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide, True)
+                prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
                 #logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
                 if prefervar in pns and pns[prefervar] not in preferred:
                     var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)

+ 116 - 0
bitbake/lib/bb/remotedata.py

@@ -0,0 +1,116 @@
+"""
+BitBake 'remotedata' module
+
+Provides support for using a datastore from the bitbake client
+"""
+
+# Copyright (C) 2016  Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bb.data
+
+class RemoteDatastores:
+    """Used on the server side to manage references to server-side datastores"""
+    def __init__(self, cooker):
+        self.cooker = cooker
+        self.datastores = {}
+        self.locked = []
+        self.nextindex = 1
+
+    def __len__(self):
+        return len(self.datastores)
+
+    def __getitem__(self, key):
+        if key is None:
+            return self.cooker.data
+        else:
+            return self.datastores[key]
+
+    def items(self):
+        return self.datastores.items()
+
+    def store(self, d, locked=False):
+        """
+        Put a datastore into the collection. If locked=True then the datastore
+        is understood to be managed externally and cannot be released by calling
+        release().
+        """
+        idx = self.nextindex
+        self.datastores[idx] = d
+        if locked:
+            self.locked.append(idx)
+        self.nextindex += 1
+        return idx
+
+    def check_store(self, d, locked=False):
+        """
+        Put a datastore into the collection if it's not already in there;
+        in either case return the index
+        """
+        for key, val in self.datastores.items():
+            if val is d:
+                idx = key
+                break
+        else:
+            idx = self.store(d, locked)
+        return idx
+
+    def release(self, idx):
+        """Discard a datastore in the collection"""
+        if idx in self.locked:
+            raise Exception('Tried to release locked datastore %d' % idx)
+        del self.datastores[idx]
+
+    def receive_datastore(self, remote_data):
+        """Receive a datastore object sent from the client (as prepared by transmit_datastore())"""
+        dct = dict(remote_data)
+        d = bb.data_smart.DataSmart()
+        d.dict = dct
+        while True:
+            if '_remote_data' in dct:
+                dsindex = dct['_remote_data']['_content']
+                del dct['_remote_data']
+                if dsindex is None:
+                    dct['_data'] = self.cooker.data.dict
+                else:
+                    dct['_data'] = self.datastores[dsindex].dict
+                break
+            elif '_data' in dct:
+                idct = dict(dct['_data'])
+                dct['_data'] = idct
+                dct = idct
+            else:
+                break
+        return d
+
+    @staticmethod
+    def transmit_datastore(d):
+        """Prepare a datastore object for sending over IPC from the client end"""
+        # FIXME content might be a dict, need to turn that into a list as well
+        def copy_dicts(dct):
+            if '_remote_data' in dct:
+                dsindex = dct['_remote_data']['_content'].dsindex
+                newdct = dct.copy()
+                newdct['_remote_data'] = {'_content': dsindex}
+                return list(newdct.items())
+            elif '_data' in dct:
+                newdct = dct.copy()
+                newdata = copy_dicts(dct['_data'])
+                if newdata:
+                    newdct['_data'] = newdata
+                return list(newdct.items())
+            return None
+        main_dict = copy_dicts(d.dict)
+        return main_dict

+ 321 - 150
bitbake/lib/bb/runqueue.py

@@ -36,6 +36,7 @@ from bb import msg, data, event
 from bb import monitordisk
 import subprocess
 import pickle
+from multiprocessing import Process
 
 bblogger = logging.getLogger("BitBake")
 logger = logging.getLogger("BitBake.RunQueue")
@@ -49,30 +50,30 @@ def taskname_from_tid(tid):
     return tid.rsplit(":", 1)[1]
 
 def split_tid(tid):
+    (mc, fn, taskname, _) = split_tid_mcfn(tid)
+    return (mc, fn, taskname)
+
+def split_tid_mcfn(tid):
     if tid.startswith('multiconfig:'):
         elems = tid.split(':')
         mc = elems[1]
         fn = ":".join(elems[2:-1])
         taskname = elems[-1]
+        mcfn = "multiconfig:" + mc + ":" + fn
     else:
         tid = tid.rsplit(":", 1)
         mc = ""
         fn = tid[0]
         taskname = tid[1]
+        mcfn = fn
 
-    return (mc, fn, taskname)
+    return (mc, fn, taskname, mcfn)
 
 def build_tid(mc, fn, taskname):
     if mc:
         return "multiconfig:" + mc + ":" + fn + ":" + taskname
     return fn + ":" + taskname
 
-def taskfn_fromtid(tid):
-    (mc, fn, taskname) = split_tid(tid)
-    if mc:
-        return "multiconfig:" + mc + ":" + fn
-    return fn
-
 class RunQueueStats:
     """
     Holds statistics on the tasks handled by the associated runQueue
@@ -135,8 +136,7 @@ class RunQueueScheduler(object):
         self.buildable = []
         self.stamps = {}
         for tid in self.rqdata.runtaskentries:
-            (mc, fn, taskname) = split_tid(tid)
-            taskfn = taskfn_fromtid(tid)
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
             self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
             if tid in self.rq.runq_buildable:
                 self.buildable.append(tid)
@@ -181,9 +181,21 @@ class RunQueueScheduler(object):
         if self.rq.stats.active < self.rq.number_tasks:
             return self.next_buildable_task()
 
-    def newbuilable(self, task):
+    def newbuildable(self, task):
         self.buildable.append(task)
 
+    def describe_task(self, taskid):
+        result = 'ID %s' % taskid
+        if self.rev_prio_map:
+            result = result + (' pri %d' % self.rev_prio_map[taskid])
+        return result
+
+    def dump_prio(self, comment):
+        bb.debug(3, '%s (most important first):\n%s' %
+                 (comment,
+                  '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
+                             index, taskid in enumerate(self.prio_map)])))
+
 class RunQueueSchedulerSpeed(RunQueueScheduler):
     """
     A scheduler optimised for speed. The priority map is sorted by task weight,
@@ -213,35 +225,100 @@ class RunQueueSchedulerSpeed(RunQueueScheduler):
 
 class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
     """
-    A scheduler optimised to complete .bb files are quickly as possible. The
+    A scheduler optimised to complete .bb files as quickly as possible. The
     priority map is sorted by task weight, but then reordered so once a given
-    .bb file starts to build, it's completed as quickly as possible. This works
-    well where disk space is at a premium and classes like OE's rm_work are in
-    force.
+    .bb file starts to build, it's completed as quickly as possible by
+    running all tasks related to the same .bb file one after the after.
+    This works well where disk space is at a premium and classes like OE's
+    rm_work are in force.
     """
     name = "completion"
 
     def __init__(self, runqueue, rqdata):
-        RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
-
-        #FIXME - whilst this groups all fns together it does not reorder the
-        #fn groups optimally.
-
-        basemap = copy.deepcopy(self.prio_map)
-        self.prio_map = []
-        while (len(basemap) > 0):
-            entry = basemap.pop(0)
-            self.prio_map.append(entry)
-            fn = fn_from_tid(entry)
-            todel = []
-            for entry in basemap:
-                entry_fn = fn_from_tid(entry)
-                if entry_fn == fn:
-                    todel.append(basemap.index(entry))
-                    self.prio_map.append(entry)
-            todel.reverse()
-            for idx in todel:
-                del basemap[idx]
+        super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
+
+        # Extract list of tasks for each recipe, with tasks sorted
+        # ascending from "must run first" (typically do_fetch) to
+        # "runs last" (do_build). The speed scheduler prioritizes
+        # tasks that must run first before the ones that run later;
+        # this is what we depend on here.
+        task_lists = {}
+        for taskid in self.prio_map:
+            fn, taskname = taskid.rsplit(':', 1)
+            task_lists.setdefault(fn, []).append(taskname)
+
+        # Now unify the different task lists. The strategy is that
+        # common tasks get skipped and new ones get inserted after the
+        # preceeding common one(s) as they are found. Because task
+        # lists should differ only by their number of tasks, but not
+        # the ordering of the common tasks, this should result in a
+        # deterministic result that is a superset of the individual
+        # task ordering.
+        all_tasks = []
+        for recipe, new_tasks in task_lists.items():
+            index = 0
+            old_task = all_tasks[index] if index < len(all_tasks) else None
+            for new_task in new_tasks:
+                if old_task == new_task:
+                    # Common task, skip it. This is the fast-path which
+                    # avoids a full search.
+                    index += 1
+                    old_task = all_tasks[index] if index < len(all_tasks) else None
+                else:
+                    try:
+                        index = all_tasks.index(new_task)
+                        # Already present, just not at the current
+                        # place. We re-synchronized by changing the
+                        # index so that it matches again. Now
+                        # move on to the next existing task.
+                        index += 1
+                        old_task = all_tasks[index] if index < len(all_tasks) else None
+                    except ValueError:
+                        # Not present. Insert before old_task, which
+                        # remains the same (but gets shifted back).
+                        all_tasks.insert(index, new_task)
+                        index += 1
+        bb.debug(3, 'merged task list: %s'  % all_tasks)
+
+        # Now reverse the order so that tasks that finish the work on one
+        # recipe are considered more imporant (= come first). The ordering
+        # is now so that do_build is most important.
+        all_tasks.reverse()
+
+        # Group tasks of the same kind before tasks of less important
+        # kinds at the head of the queue (because earlier = lower
+        # priority number = runs earlier), while preserving the
+        # ordering by recipe. If recipe foo is more important than
+        # bar, then the goal is to work on foo's do_populate_sysroot
+        # before bar's do_populate_sysroot and on the more important
+        # tasks of foo before any of the less important tasks in any
+        # other recipe (if those other recipes are more important than
+        # foo).
+        #
+        # All of this only applies when tasks are runable. Explicit
+        # dependencies still override this ordering by priority.
+        #
+        # Here's an example why this priority re-ordering helps with
+        # minimizing disk usage. Consider a recipe foo with a higher
+        # priority than bar where foo DEPENDS on bar. Then the
+        # implicit rule (from base.bbclass) is that foo's do_configure
+        # depends on bar's do_populate_sysroot. This ensures that
+        # bar's do_populate_sysroot gets done first. Normally the
+        # tasks from foo would continue to run once that is done, and
+        # bar only gets completed and cleaned up later. By ordering
+        # bar's task that depend on bar's do_populate_sysroot before foo's
+        # do_configure, that problem gets avoided.
+        task_index = 0
+        self.dump_prio('original priorities')
+        for task in all_tasks:
+            for index in range(task_index, self.numTasks):
+                taskid = self.prio_map[index]
+                taskname = taskid.rsplit(':', 1)[1]
+                if taskname == task:
+                    del self.prio_map[index]
+                    self.prio_map.insert(task_index, taskid)
+                    task_index += 1
+        self.dump_prio('completion priorities')
 
 class RunTaskEntry(object):
     def __init__(self):
@@ -263,9 +340,11 @@ class RunQueueData:
         self.rq = rq
         self.warn_multi_bb = False
 
-        self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
-        self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+        self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
+        self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
         self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
+        self.setscenewhitelist_checked = False
+        self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
         self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
 
         self.reset()
@@ -289,8 +368,8 @@ class RunQueueData:
         return tid + task_name_suffix
 
     def get_short_user_idstring(self, task, task_name_suffix = ""):
-        (mc, fn, taskname) = split_tid(task)
-        pn = self.dataCaches[mc].pkg_fn[fn]
+        (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+        pn = self.dataCaches[mc].pkg_fn[taskfn]
         taskname = taskname_from_tid(task) + task_name_suffix
         return "%s:%s" % (pn, taskname)
 
@@ -511,9 +590,8 @@ class RunQueueData:
         for mc in taskData:
             for tid in taskData[mc].taskentries:
 
-                (mc, fn, taskname) = split_tid(tid)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                 #runtid = build_tid(mc, fn, taskname)
-                taskfn = taskfn_fromtid(tid)
 
                 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
 
@@ -529,7 +607,7 @@ class RunQueueData:
                 #
                 # e.g. addtask before X after Y
                 for t in taskData[mc].taskentries[tid].tdepends:
-                    (_, depfn, deptaskname) = split_tid(t)
+                    (_, depfn, deptaskname, _) = split_tid_mcfn(t)
                     depends.add(build_tid(mc, depfn, deptaskname))
 
                 # Resolve 'deptask' dependencies
@@ -566,6 +644,8 @@ class RunQueueData:
                 for (depname, idependtask) in irdepends:
                     if depname in taskData[mc].run_targets:
                         # Won't be in run_targets if ASSUME_PROVIDED
+                        if not taskData[mc].run_targets[depname]:
+                            continue
                         depdata = taskData[mc].run_targets[depname][0]
                         if depdata is not None:
                             t = depdata + ":" + idependtask
@@ -611,12 +691,15 @@ class RunQueueData:
 
             def generate_recdeps(t):
                 newdeps = set()
-                (mc, fn, taskname) = split_tid(t)
+                (mc, fn, taskname, _) = split_tid_mcfn(t)
                 add_resolved_dependencies(mc, fn, tasknames, newdeps)
                 extradeps[tid].update(newdeps)
                 seendeps.add(t)
                 newdeps.add(t)
                 for i in newdeps:
+                    if i not in self.runtaskentries:
+                        # Not all recipes might have the recrdeptask task as a task
+                        continue
                     task = self.runtaskentries[i].task
                     for n in self.runtaskentries[i].depends:
                         if n not in seendeps:
@@ -723,6 +806,23 @@ class RunQueueData:
 
         self.init_progress_reporter.next_stage()
 
+        if self.cooker.configuration.runall is not None:
+            runall = "do_%s" % self.cooker.configuration.runall
+            runall_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == runall }
+
+            # re-run the mark_active and then drop unused tasks from new list
+            runq_build = {}
+            for tid in list(runall_tids):
+                mark_active(tid,1)
+
+            for tid in list(self.runtaskentries.keys()):
+                if tid not in runq_build:
+                    del self.runtaskentries[tid]
+                    delcount += 1
+
+            if len(self.runtaskentries) == 0:
+                bb.msg.fatal("RunQueue", "No remaining tasks to run for build target %s with runall %s" % (target, runall))
+
         #
         # Step D - Sanity checks and computation
         #
@@ -774,8 +874,7 @@ class RunQueueData:
             prov_list = {}
             seen_fn = []
             for tid in self.runtaskentries:
-                (tidmc, fn, taskname) = split_tid(tid)
-                taskfn = taskfn_fromtid(tid)
+                (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                 if taskfn in seen_fn:
                     continue
                 if mc != tidmc:
@@ -885,15 +984,15 @@ class RunQueueData:
         self.runq_setscene_tids = []
         if not self.cooker.configuration.nosetscene:
             for tid in self.runtaskentries:
-                (mc, fn, taskname) = split_tid(tid)
-                setscenetid = fn + ":" + taskname + "_setscene"
+                (mc, fn, taskname, _) = split_tid_mcfn(tid)
+                setscenetid = tid + "_setscene"
                 if setscenetid not in taskData[mc].taskentries:
                     continue
                 self.runq_setscene_tids.append(tid)
 
         def invalidate_task(tid, error_nostamp):
-            (mc, fn, taskname) = split_tid(tid)
-            taskdep = self.dataCaches[mc].task_deps[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+            taskdep = self.dataCaches[mc].task_deps[taskfn]
             if fn + ":" + taskname not in taskData[mc].taskentries:
                 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
             if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
@@ -946,8 +1045,7 @@ class RunQueueData:
                     procdep = []
                     for dep in self.runtaskentries[tid].depends:
                         procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
-                    (mc, fn, taskname) = split_tid(tid)
-                    taskfn = taskfn_fromtid(tid)
+                    (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                     self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
                     task = self.runtaskentries[tid].task
 
@@ -979,16 +1077,22 @@ class RunQueue:
         self.cfgData = cfgData
         self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
 
-        self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
-        self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
-        self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2", True) or None
-        self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
+        self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
+        self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
+        self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
+        self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
 
         self.state = runQueuePrepare
 
         # For disk space monitor
+        # Invoked at regular time intervals via the bitbake heartbeat event
+        # while the build is running. We generate a unique name for the handler
+        # here, just in case that there ever is more than one RunQueue instance,
+        # start the handler when reaching runQueueSceneRun, and stop it when
+        # done with the build.
         self.dm = monitordisk.diskMonitor(cfgData)
-
+        self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
+        self.dm_event_handler_registered = False
         self.rqexe = None
         self.worker = {}
         self.fakeworker = {}
@@ -1000,8 +1104,9 @@ class RunQueue:
             magic = "decafbadbad"
         if fakeroot:
             magic = magic + "beef"
-            fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
-            fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
+            mcdata = self.cooker.databuilder.mcdata[mc]
+            fakerootcmd = mcdata.getVar("FAKEROOTCMD")
+            fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
             env = os.environ.copy()
             for key, value in (var.split('=') for var in fakerootenv):
                 env[key] = value
@@ -1027,12 +1132,13 @@ class RunQueue:
             "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
             "logdefaultdomain" : bb.msg.loggerDefaultDomains,
             "prhost" : self.cooker.prhost,
-            "buildname" : self.cfgData.getVar("BUILDNAME", True),
-            "date" : self.cfgData.getVar("DATE", True),
-            "time" : self.cfgData.getVar("TIME", True),
+            "buildname" : self.cfgData.getVar("BUILDNAME"),
+            "date" : self.cfgData.getVar("DATE"),
+            "time" : self.cfgData.getVar("TIME"),
         }
 
         worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
+        worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
         worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
         worker.stdin.flush()
 
@@ -1062,10 +1168,9 @@ class RunQueue:
         for mc in self.rqdata.dataCaches:
             self.worker[mc] = self._start_worker(mc)
 
-    def start_fakeworker(self, rqexec):
-        if not self.fakeworker:
-            for mc in self.rqdata.dataCaches:
-                self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
+    def start_fakeworker(self, rqexec, mc):
+        if not mc in self.fakeworker:
+            self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
 
     def teardown_workers(self):
         self.teardown = True
@@ -1099,8 +1204,7 @@ class RunQueue:
             except:
                 return None
 
-        (mc, fn, tn) = split_tid(tid)
-        taskfn = taskfn_fromtid(tid)
+        (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
         if taskname is None:
             taskname = tn
 
@@ -1134,8 +1238,7 @@ class RunQueue:
         t1 = get_timestamp(stampfile)
         for dep in self.rqdata.runtaskentries[tid].depends:
             if iscurrent:
-                (mc2, fn2, taskname2) = split_tid(dep)
-                taskfn2 = taskfn_fromtid(dep)
+                (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
                 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
                 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
                 t2 = get_timestamp(stampfile2)
@@ -1213,10 +1316,12 @@ class RunQueue:
                 self.rqdata.init_progress_reporter.next_stage()
                 self.rqexe = RunQueueExecuteScenequeue(self)
 
-        if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
-            self.dm.check(self)
-
         if self.state is runQueueSceneRun:
+            if not self.dm_event_handler_registered:
+                 res = bb.event.register(self.dm_event_handler_name,
+                                         lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
+                                         ('bb.event.HeartbeatEvent',))
+                 self.dm_event_handler_registered = True
             retval = self.rqexe.execute()
 
         if self.state is runQueueRunInit:
@@ -1235,7 +1340,13 @@ class RunQueue:
         if self.state is runQueueCleanUp:
             retval = self.rqexe.finish()
 
-        if (self.state is runQueueComplete or self.state is runQueueFailed) and self.rqexe:
+        build_done = self.state is runQueueComplete or self.state is runQueueFailed
+
+        if build_done and self.dm_event_handler_registered:
+            bb.event.remove(self.dm_event_handler_name, None)
+            self.dm_event_handler_registered = False
+
+        if build_done and self.rqexe:
             self.teardown_workers()
             if self.rqexe.stats.failed:
                 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
@@ -1244,12 +1355,7 @@ class RunQueue:
                 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
 
         if self.state is runQueueFailed:
-            if not self.rqdata.taskData[''].tryaltconfigs:
-                raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
-            for tid in self.rqexe.failed_tids:
-                (mc, fn, tn) = split_tid(tid)
-                self.rqdata.taskData[mc].fail_fn(fn)
-            self.rqdata.reset()
+            raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
 
         if self.state is runQueueComplete:
             # All done
@@ -1292,15 +1398,36 @@ class RunQueue:
         else:
             self.rqexe.finish()
 
+    def rq_dump_sigfn(self, fn, options):
+        bb_cache = bb.cache.NoCache(self.cooker.databuilder)
+        the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
+        siggen = bb.parse.siggen
+        dataCaches = self.rqdata.dataCaches
+        siggen.dump_sigfn(fn, dataCaches, options)
+
     def dump_signatures(self, options):
-        done = set()
+        fns = set()
         bb.note("Reparsing files to collect dependency data")
-        bb_cache = bb.cache.NoCache(self.cooker.databuilder)
+
         for tid in self.rqdata.runtaskentries:
-            fn = taskfn_fromtid(tid)
-            if fn not in done:
-                the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
-                done.add(fn)
+            fn = fn_from_tid(tid)
+            fns.add(fn)
+
+        max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
+        # We cannot use the real multiprocessing.Pool easily due to some local data
+        # that can't be pickled. This is a cheap multi-process solution.
+        launched = []
+        while fns:
+            if len(launched) < max_process:
+                p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
+                p.start()
+                launched.append(p)
+            for q in launched:
+                # The finished processes are joined when calling is_alive()
+                if not q.is_alive():
+                    launched.remove(q)
+        for p in launched:
+                p.join()
 
         bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
 
@@ -1319,8 +1446,7 @@ class RunQueue:
         valid_new = set()
 
         for tid in self.rqdata.runtaskentries:
-            (mc, fn, taskname) = split_tid(tid)
-            taskfn = taskfn_fromtid(tid)
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
             taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
 
             if 'noexec' in taskdep and taskname in taskdep['noexec']:
@@ -1328,11 +1454,11 @@ class RunQueue:
                 continue
 
             sq_fn.append(fn)
-            sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
+            sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
             sq_hash.append(self.rqdata.runtaskentries[tid].hash)
             sq_taskname.append(taskname)
             sq_task.append(tid)
-        locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
+        locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
         try:
             call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
             valid = bb.utils.better_eval(call, locs)
@@ -1408,8 +1534,8 @@ class RunQueue:
 
 
         for tid in invalidtasks:
-            (mc, fn, taskname) = split_tid(tid)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             h = self.rqdata.runtaskentries[tid].hash
             matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
             match = None
@@ -1433,8 +1559,8 @@ class RunQueueExecute:
         self.cfgData = rq.cfgData
         self.rqdata = rq.rqdata
 
-        self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
-        self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
+        self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
+        self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
 
         self.runq_buildable = set()
         self.runq_running = set()
@@ -1512,11 +1638,11 @@ class RunQueueExecute:
         taskdata = {}
         taskdeps.add(task)
         for dep in taskdeps:
-            (mc, fn, taskname) = split_tid(dep)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             taskdata[dep] = [pn, taskname, fn]
         call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
-        locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
+        locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
         valid = bb.utils.better_eval(call, locs)
         return valid
 
@@ -1569,8 +1695,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
             tasknames = {}
             fns = {}
             for tid in self.rqdata.runtaskentries:
-                (mc, fn, taskname) = split_tid(tid)
-                taskfn = taskfn_fromtid(tid)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
                 fns[tid] = taskfn
                 tasknames[tid] = taskname
@@ -1585,13 +1710,12 @@ class RunQueueExecuteTasks(RunQueueExecute):
                 invalidtasks.append(tid)
 
             call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
-            locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.expanded_data, "invalidtasks" : invalidtasks }
+            locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
             covered_remove = bb.utils.better_eval(call, locs)
 
         def removecoveredtask(tid):
-            (mc, fn, taskname) = split_tid(tid)
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
             taskname = taskname + '_setscene'
-            taskfn = taskfn_fromtid(tid)
             bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
             self.rq.scenequeue_covered.remove(tid)
 
@@ -1617,7 +1741,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
         for mc in self.rqdata.dataCaches:
             target_pairs = []
             for tid in self.rqdata.target_tids:
-                (tidmc, fn, taskname) = split_tid(tid)
+                (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
                 if tidmc == mc:
                     target_pairs.append((fn, taskname))
 
@@ -1638,7 +1762,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
                              if type(obj) is type and
                                 issubclass(obj, RunQueueScheduler))
 
-        user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
+        user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
         if user_schedulers:
             for sched in user_schedulers.split():
                 if not "." in sched:
@@ -1657,7 +1781,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
 
     def setbuildable(self, task):
         self.runq_buildable.add(task)
-        self.sched.newbuilable(task)
+        self.sched.newbuildable(task)
 
     def task_completeoutright(self, task):
         """
@@ -1710,10 +1834,12 @@ class RunQueueExecuteTasks(RunQueueExecute):
         Run the tasks in a queue prepared by rqdata.prepare()
         """
 
-        if self.rqdata.setscenewhitelist:
+        if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
+            self.rqdata.setscenewhitelist_checked = True
+
             # Check tasks that are going to run against the whitelist
             def check_norun_task(tid, showerror=False):
-                (mc, fn, taskname) = split_tid(tid)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                 # Ignore covered tasks
                 if tid in self.rq.scenequeue_covered:
                     return False
@@ -1721,11 +1847,11 @@ class RunQueueExecuteTasks(RunQueueExecute):
                 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
                     return False
                 # Ignore noexec tasks
-                taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
+                taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
                 if 'noexec' in taskdep and taskname in taskdep['noexec']:
                     return False
 
-                pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+                pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
                 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
                     if showerror:
                         if tid in self.rqdata.runq_setscene_tids:
@@ -1761,8 +1887,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
 
         task = self.sched.next()
         if task is not None:
-            (mc, fn, taskname) = split_tid(task)
-            taskfn = taskfn_fromtid(task)
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
 
             if task in self.rq.scenequeue_covered:
                 logger.debug(2, "Setscene covered task %s", task)
@@ -1782,7 +1907,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
                 bb.event.fire(startevent, self.cfgData)
                 self.runq_running.add(task)
                 self.stats.taskActive()
-                if not self.cooker.configuration.dry_run:
+                if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
                     bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
                 self.task_complete(task)
                 return True
@@ -1793,19 +1918,19 @@ class RunQueueExecuteTasks(RunQueueExecute):
             taskdepdata = self.build_taskdepdata(task)
 
             taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
-            if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
-                if not self.rq.fakeworker:
+            if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
+                if not mc in self.rq.fakeworker:
                     try:
-                        self.rq.start_fakeworker(self)
+                        self.rq.start_fakeworker(self, mc)
                     except OSError as exc:
                         logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
                         self.rq.state = runQueueFailed
                         self.stats.taskFailed()
                         return True
-                self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
+                self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
                 self.rq.fakeworker[mc].process.stdin.flush()
             else:
-                self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>")
+                self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
                 self.rq.worker[mc].process.stdin.flush()
 
             self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
@@ -1842,12 +1967,12 @@ class RunQueueExecuteTasks(RunQueueExecute):
         while next:
             additional = []
             for revdep in next:
-                (mc, fn, taskname) = split_tid(revdep)
-                taskfn = taskfn_fromtid(revdep)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
                 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
                 deps = self.rqdata.runtaskentries[revdep].depends
                 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
-                taskdepdata[revdep] = [pn, taskname, fn, deps, provides]
+                taskhash = self.rqdata.runtaskentries[revdep].hash
+                taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
                 for revdep2 in deps:
                     if revdep2 not in taskdepdata:
                         additional.append(revdep2)
@@ -1876,6 +2001,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         sq_revdeps_new = {}
         sq_revdeps_squash = {}
         self.sq_harddeps = {}
+        self.stamps = {}
 
         # We need to construct a dependency graph for the setscene functions. Intermediate
         # dependencies between the setscene tasks only complicate the code. This code
@@ -1899,6 +2025,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         for tid in self.rqdata.runq_setscene_tids:
             #bb.warn("Added endpoint 2 %s" % (tid))
             for dep in self.rqdata.runtaskentries[tid].depends:
+                    if tid in sq_revdeps[dep]:
+                        sq_revdeps[dep].remove(tid)
                     if dep not in endpoints:
                         endpoints[dep] = set()
                     #bb.warn("  Added endpoint 3 %s" % (dep))
@@ -1918,12 +2046,13 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
                 if point in self.rqdata.runq_setscene_tids:
                     sq_revdeps_new[point] = tasks
                     tasks = set()
+                    continue
                 for dep in self.rqdata.runtaskentries[point].depends:
                     if point in sq_revdeps[dep]:
                         sq_revdeps[dep].remove(point)
                     if tasks:
                         sq_revdeps_new[dep] |= tasks
-                    if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
+                    if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
                         newendpoints[dep] = task
             if len(newendpoints) != 0:
                 process_endpoints(newendpoints)
@@ -1986,9 +2115,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
         # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
         for tid in self.rqdata.runq_setscene_tids:
-                (mc, fn, taskname) = split_tid(tid)
-                realtid = fn + ":" + taskname + "_setscene"
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+                realtid = tid + "_setscene"
                 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
+                self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
                 for (depname, idependtask) in idepends:
 
                     if depname not in self.rqdata.taskData[mc].build_targets:
@@ -2050,10 +2180,9 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
             noexec = []
             stamppresent = []
             for tid in self.sq_revdeps:
-                (mc, fn, taskname) = split_tid(tid)
-                taskfn = taskfn_fromtid(tid)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
 
-                taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
+                taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
 
                 if 'noexec' in taskdep and taskname in taskdep['noexec']:
                     noexec.append(tid)
@@ -2074,12 +2203,12 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
                     continue
 
                 sq_fn.append(fn)
-                sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
+                sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
                 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
                 sq_taskname.append(taskname)
                 sq_task.append(tid)
             call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
-            locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
+            locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
             valid = bb.utils.better_eval(call, locs)
 
             valid_new = stamppresent
@@ -2120,10 +2249,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         self.scenequeue_updatecounters(task)
 
     def check_taskfail(self, task):
-        if self.rqdata.setscenewhitelist:
+        if self.rqdata.setscenewhitelist is not None:
             realtask = task.split('_setscene')[0]
-            (mc, fn, taskname) = split_tid(realtask)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
                 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
                 self.rq.state = runQueueCleanUp
@@ -2166,7 +2295,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         if self.stats.active < self.number_tasks:
             # Find the next setscene to run
             for nexttask in self.rqdata.runq_setscene_tids:
-                if nexttask in self.runq_buildable and nexttask not in self.runq_running:
+                if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
                     if nexttask in self.unskippable:
                         logger.debug(2, "Setscene task %s is unskippable" % nexttask)
                     if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
@@ -2186,8 +2315,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
                     task = nexttask
                     break
         if task is not None:
-            (mc, fn, taskname) = split_tid(task)
-            taskfn = taskfn_fromtid(task)
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
             taskname = taskname + "_setscene"
             if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
                 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
@@ -2207,16 +2335,20 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
             startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
             bb.event.fire(startevent, self.cfgData)
 
+            taskdepdata = self.build_taskdepdata(task)
+
             taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
             if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
-                if not self.rq.fakeworker:
-                    self.rq.start_fakeworker(self)
-                self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
+                if not mc in self.rq.fakeworker:
+                    self.rq.start_fakeworker(self, mc)
+                self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
                 self.rq.fakeworker[mc].process.stdin.flush()
             else:
-                self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
+                self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
                 self.rq.worker[mc].process.stdin.flush()
 
+            self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+            self.build_stamps2.append(self.build_stamps[task])
             self.runq_running.add(task)
             self.stats.taskActive()
             if self.stats.active < self.number_tasks:
@@ -2235,7 +2367,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
         self.rq.scenequeue_covered = self.scenequeue_covered
         self.rq.scenequeue_notcovered = self.scenequeue_notcovered
 
-        logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
+        logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
 
         self.rq.state = runQueueRunInit
 
@@ -2247,6 +2379,44 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
     def runqueue_process_waitpid(self, task, status):
         RunQueueExecute.runqueue_process_waitpid(self, task, status)
 
+
+    def build_taskdepdata(self, task):
+        def getsetscenedeps(tid):
+            deps = set()
+            (mc, fn, taskname, _) = split_tid_mcfn(tid)
+            realtid = tid + "_setscene"
+            idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
+            for (depname, idependtask) in idepends:
+                if depname not in self.rqdata.taskData[mc].build_targets:
+                    continue
+
+                depfn = self.rqdata.taskData[mc].build_targets[depname][0]
+                if depfn is None:
+                     continue
+                deptid = depfn + ":" + idependtask.replace("_setscene", "")
+                deps.add(deptid)
+            return deps
+
+        taskdepdata = {}
+        next = getsetscenedeps(task)
+        next.add(task)
+        while next:
+            additional = []
+            for revdep in next:
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
+                pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+                deps = getsetscenedeps(revdep)
+                provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
+                taskhash = self.rqdata.runtaskentries[revdep].hash
+                taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
+                for revdep2 in deps:
+                    if revdep2 not in taskdepdata:
+                        additional.append(revdep2)
+            next = additional
+
+        #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
+        return taskdepdata
+
 class TaskFailure(Exception):
     """
     Exception raised when a task in a runqueue fails
@@ -2313,6 +2483,9 @@ class runQueueTaskFailed(runQueueEvent):
         runQueueEvent.__init__(self, task, stats, rq)
         self.exitcode = exitcode
 
+    def __str__(self):
+        return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
+
 class sceneQueueTaskFailed(sceneQueueEvent):
     """
     Event notifying a setscene task failed
@@ -2321,6 +2494,9 @@ class sceneQueueTaskFailed(sceneQueueEvent):
         sceneQueueEvent.__init__(self, task, stats, rq)
         self.exitcode = exitcode
 
+    def __str__(self):
+        return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
+
 class sceneQueueComplete(sceneQueueEvent):
     """
     Event when all the sceneQueue tasks are complete
@@ -2365,16 +2541,11 @@ class runQueuePipe():
         self.rqexec = rqexec
 
     def read(self):
-        for w in [self.rq.worker, self.rq.fakeworker]:
-            for mc in w:
-                w[mc].process.poll()
-                if w[mc].process.returncode is not None and not self.rq.teardown:
-                    name = None
-                    if w in self.rq.worker:
-                        name = "Worker"
-                    elif w in self.rq.fakeworker:
-                        name = "Fakeroot"
-                    bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, w.pid, str(w.returncode)))
+        for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
+            for worker in workers.values():
+                worker.process.poll()
+                if worker.process.returncode is not None and not self.rq.teardown:
+                    bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
                     self.rq.finish_runqueue(True)
 
         start = len(self.queue)
@@ -2417,9 +2588,9 @@ class runQueuePipe():
         self.input.close()
 
 def get_setscene_enforce_whitelist(d):
-    if d.getVar('BB_SETSCENE_ENFORCE', True) != '1':
+    if d.getVar('BB_SETSCENE_ENFORCE') != '1':
         return None
-    whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST", True) or "").split()
+    whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
     outlist = []
     for item in whitelist[:]:
         if item.startswith('%:'):
@@ -2432,7 +2603,7 @@ def get_setscene_enforce_whitelist(d):
 
 def check_setscene_enforce_whitelist(pn, taskname, whitelist):
     import fnmatch
-    if whitelist:
+    if whitelist is not None:
         item = '%s:%s' % (pn, taskname)
         for whitelist_item in whitelist:
             if fnmatch.fnmatch(item, whitelist_item):

+ 0 - 78
bitbake/lib/bb/server/__init__.py

@@ -18,82 +18,4 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
-""" Base code for Bitbake server process
 
-Have a common base for that all Bitbake server classes ensures a consistent
-approach to the interface, and minimize risks associated with code duplication.
-
-"""
-
-"""  BaseImplServer() the base class for all XXServer() implementations.
-
-    These classes contain the actual code that runs the server side, i.e.
-    listens for the commands and executes them. Although these implementations
-    contain all the data of the original bitbake command, i.e the cooker instance,
-    they may well run on a different process or even machine.
-
-"""
-
-class BaseImplServer():
-    def __init__(self):
-        self._idlefuns = {}
-
-    def addcooker(self, cooker):
-        self.cooker = cooker
-
-    def register_idle_function(self, function, data):
-        """Register a function to be called while the server is idle"""
-        assert hasattr(function, '__call__')
-        self._idlefuns[function] = data
-
-
-
-""" BitBakeBaseServerConnection class is the common ancestor to all
-    BitBakeServerConnection classes.
-
-    These classes control the remote server. The only command currently
-    implemented is the terminate() command.
-
-"""
-
-class BitBakeBaseServerConnection():
-    def __init__(self, serverImpl):
-        pass
-
-    def terminate(self):
-        pass
-
-    def setupEventQueue(self):
-        pass
-
-
-""" BitBakeBaseServer class is the common ancestor to all Bitbake servers
-
-    Derive this class in order to implement a BitBakeServer which is the
-    controlling stub for the actual server implementation
-
-"""
-class BitBakeBaseServer(object):
-    def initServer(self):
-        self.serverImpl = None  # we ensure a runtime crash if not overloaded
-        self.connection = None
-        return
-
-    def addcooker(self, cooker):
-        self.cooker = cooker
-        self.serverImpl.addcooker(cooker)
-
-    def getServerIdleCB(self):
-        return self.serverImpl.register_idle_function
-
-    def saveConnectionDetails(self):
-        return
-
-    def detach(self):
-        return
-
-    def establishConnection(self, featureset):
-        raise   "Must redefine the %s.establishConnection()" % self.__class__.__name__
-
-    def endSession(self):
-        self.connection.terminate()

+ 526 - 172
bitbake/lib/bb/server/process.py

@@ -22,115 +22,245 @@
 
 import bb
 import bb.event
-import itertools
 import logging
 import multiprocessing
+import threading
+import array
 import os
-import signal
 import sys
 import time
 import select
-from queue import Empty
-from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
-
-from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
+import socket
+import subprocess
+import errno
+import re
+import datetime
+import bb.server.xmlrpcserver
+from bb import daemonize
+from multiprocessing import queues
 
 logger = logging.getLogger('BitBake')
 
-class ServerCommunicator():
-    def __init__(self, connection, event_handle, server):
-        self.connection = connection
-        self.event_handle = event_handle
-        self.server = server
+class ProcessTimeout(SystemExit):
+    pass
 
-    def runCommand(self, command):
-        # @todo try/except
-        self.connection.send(command)
+class ProcessServer(multiprocessing.Process):
+    profile_filename = "profile.log"
+    profile_processed_filename = "profile.log.processed"
 
-        if not self.server.is_alive():
-            raise SystemExit
+    def __init__(self, lock, sock, sockname):
+        multiprocessing.Process.__init__(self)
+        self.command_channel = False
+        self.command_channel_reply = False
+        self.quit = False
+        self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore.
+        self.next_heartbeat = time.time()
 
-        while True:
-            # don't let the user ctrl-c while we're waiting for a response
-            try:
-                for idx in range(0,4): # 0, 1, 2, 3
-                    if self.connection.poll(5):
-                        return self.connection.recv()
-                    else:
-                        bb.warn("Timeout while attempting to communicate with bitbake server")
-                bb.fatal("Gave up; Too many tries: timeout while attempting to communicate with bitbake server")
-            except KeyboardInterrupt:
-                pass
+        self.event_handle = None
+        self.haveui = False
+        self.lastui = False
+        self.xmlrpc = False
 
-    def getEventHandle(self):
-        return self.event_handle.value
+        self._idlefuns = {}
 
-class EventAdapter():
-    """
-    Adapter to wrap our event queue since the caller (bb.event) expects to
-    call a send() method, but our actual queue only has put()
-    """
-    def __init__(self, queue):
-        self.queue = queue
+        self.bitbake_lock = lock
+        self.sock = sock
+        self.sockname = sockname
 
-    def send(self, event):
+    def register_idle_function(self, function, data):
+        """Register a function to be called while the server is idle"""
+        assert hasattr(function, '__call__')
+        self._idlefuns[function] = data
+
+    def run(self):
+
+        if self.xmlrpcinterface[0]:
+            self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self)
+
+            print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port))
+
+        heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT')
+        if heartbeat_event:
+            try:
+                self.heartbeat_seconds = float(heartbeat_event)
+            except:
+                bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event)
+
+        self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT')
         try:
-            self.queue.put(event)
-        except Exception as err:
-            print("EventAdapter puked: %s" % str(err))
+            if self.timeout:
+                self.timeout = float(self.timeout)
+        except:
+            bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
 
 
-class ProcessServer(Process, BaseImplServer):
-    profile_filename = "profile.log"
-    profile_processed_filename = "profile.log.processed"
+        try:
+            self.bitbake_lock.seek(0)
+            self.bitbake_lock.truncate()
+            if self.xmlrpc:
+                self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port))
+            else:
+                self.bitbake_lock.write("%s\n" % (os.getpid()))
+            self.bitbake_lock.flush()
+        except Exception as e:
+            print("Error writing to lock file: %s" % str(e))
+            pass
+
+        if self.cooker.configuration.profile:
+            try:
+                import cProfile as profile
+            except:
+                import profile
+            prof = profile.Profile()
 
-    def __init__(self, command_channel, event_queue, featurelist):
-        BaseImplServer.__init__(self)
-        Process.__init__(self)
-        self.command_channel = command_channel
-        self.event_queue = event_queue
-        self.event = EventAdapter(event_queue)
-        self.featurelist = featurelist
-        self.quit = False
+            ret = profile.Profile.runcall(prof, self.main)
 
-        self.quitin, self.quitout = Pipe()
-        self.event_handle = multiprocessing.Value("i")
+            prof.dump_stats("profile.log")
+            bb.utils.process_profilelog("profile.log")
+            print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed")
 
-    def run(self):
-        for event in bb.event.ui_queue:
-            self.event_queue.put(event)
-        self.event_handle.value = bb.event.register_UIHhandler(self, True)
+        else:
+            ret = self.main()
 
-        bb.cooker.server_main(self.cooker, self.main)
+        return ret
 
     def main(self):
-        # Ignore SIGINT within the server, as all SIGINT handling is done by
-        # the UI and communicated to us
-        self.quitin.close()
-        signal.signal(signal.SIGINT, signal.SIG_IGN)
+        self.cooker.pre_serve()
+
         bb.utils.set_process_name("Cooker")
+
+        ready = []
+
+        self.controllersock = False
+        fds = [self.sock]
+        if self.xmlrpc:
+            fds.append(self.xmlrpc)
+        print("Entering server connection loop")
+
+        def disconnect_client(self, fds):
+            if not self.haveui:
+                return
+            print("Disconnecting Client")
+            fds.remove(self.controllersock)
+            fds.remove(self.command_channel)
+            bb.event.unregister_UIHhandler(self.event_handle, True)
+            self.command_channel_reply.writer.close()
+            self.event_writer.writer.close()
+            del self.event_writer
+            self.controllersock.close()
+            self.controllersock = False
+            self.haveui = False
+            self.lastui = time.time()
+            self.cooker.clientComplete()
+            if self.timeout is None:
+                print("No timeout, exiting.")
+                self.quit = True
+
         while not self.quit:
-            try:
-                if self.command_channel.poll():
-                    command = self.command_channel.recv()
-                    self.runCommand(command)
-                if self.quitout.poll():
-                    self.quitout.recv()
-                    self.quit = True
-                    try:
-                        self.runCommand(["stateForceShutdown"])
-                    except:
-                        pass
+            if self.sock in ready:
+                self.controllersock, address = self.sock.accept()
+                if self.haveui:
+                    print("Dropping connection attempt as we have a UI %s" % (str(ready)))
+                    self.controllersock.close()
+                else:
+                    print("Accepting %s" % (str(ready)))
+                    fds.append(self.controllersock)
+            if self.controllersock in ready:
+                try:
+                    print("Connecting Client")
+                    ui_fds = recvfds(self.controllersock, 3)
+
+                    # Where to write events to
+                    writer = ConnectionWriter(ui_fds[0])
+                    self.event_handle = bb.event.register_UIHhandler(writer, True)
+                    self.event_writer = writer
 
-                self.idle_commands(.1, [self.command_channel, self.quitout])
-            except Exception:
-                logger.exception('Running command %s', command)
+                    # Where to read commands from
+                    reader = ConnectionReader(ui_fds[1])
+                    fds.append(reader)
+                    self.command_channel = reader
 
-        self.event_queue.close()
-        bb.event.unregister_UIHhandler(self.event_handle.value)
-        self.command_channel.close()
-        self.cooker.shutdown(True)
-        self.quitout.close()
+                    # Where to send command return values to
+                    writer = ConnectionWriter(ui_fds[2])
+                    self.command_channel_reply = writer
+
+                    self.haveui = True
+
+                except (EOFError, OSError):
+                    disconnect_client(self, fds)
+
+            if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \
+                    (self.lastui + self.timeout) < time.time():
+                print("Server timeout, exiting.")
+                self.quit = True
+
+            if self.command_channel in ready:
+                try:
+                    command = self.command_channel.get()
+                except EOFError:
+                    # Client connection shutting down
+                    ready = []
+                    disconnect_client(self, fds)
+                    continue
+                if command[0] == "terminateServer":
+                    self.quit = True
+                    continue
+                try:
+                    print("Running command %s" % command)
+                    self.command_channel_reply.send(self.cooker.command.runCommand(command))
+                except Exception as e:
+                   logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
+
+            if self.xmlrpc in ready:
+                self.xmlrpc.handle_requests()
+
+            ready = self.idle_commands(.1, fds)
+
+        print("Exiting")
+        # Remove the socket file so we don't get any more connections to avoid races
+        os.unlink(self.sockname)
+        self.sock.close()
+
+        try: 
+            self.cooker.shutdown(True)
+        except:
+            pass
+
+        self.cooker.post_serve()
+
+        # Finally release the lockfile but warn about other processes holding it open
+        lock = self.bitbake_lock
+        lockfile = lock.name
+        lock.close()
+        lock = None
+
+        while not lock:
+            with bb.utils.timeout(3):
+                lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
+                if not lock:
+                    # Some systems may not have lsof available
+                    procs = None
+                    try:
+                        procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
+                    except OSError as e:
+                        if e.errno != errno.ENOENT:
+                            raise
+                    if procs is None:
+                        # Fall back to fuser if lsof is unavailable
+                        try:
+                            procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
+                        except OSError as e:
+                            if e.errno != errno.ENOENT:
+                                raise
+
+                    msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
+                    if procs:
+                        msg += ":\n%s" % str(procs)
+                    print(msg)
+                    return
+        # We hold the lock so we can remove the file (hide stale pid data)
+        bb.utils.remove(lockfile)
+        bb.utils.unlockfile(lock)
 
     def idle_commands(self, delay, fds=None):
         nextsleep = delay
@@ -160,109 +290,333 @@ class ProcessServer(Process, BaseImplServer):
                 del self._idlefuns[function]
                 self.quit = True
 
+        # Create new heartbeat event?
+        now = time.time()
+        if now >= self.next_heartbeat:
+            # We might have missed heartbeats. Just trigger once in
+            # that case and continue after the usual delay.
+            self.next_heartbeat += self.heartbeat_seconds
+            if self.next_heartbeat <= now:
+                self.next_heartbeat = now + self.heartbeat_seconds
+            heartbeat = bb.event.HeartbeatEvent(now)
+            bb.event.fire(heartbeat, self.cooker.data)
+        if nextsleep and now + nextsleep > self.next_heartbeat:
+            # Shorten timeout so that we we wake up in time for
+            # the heartbeat.
+            nextsleep = self.next_heartbeat - now
+
         if nextsleep is not None:
-            select.select(fds,[],[],nextsleep)
+            if self.xmlrpc:
+                nextsleep = self.xmlrpc.get_timeout(nextsleep)
+            try:
+                return select.select(fds,[],[],nextsleep)[0]
+            except InterruptedError:
+                # Ignore EINTR
+                return []
+        else:
+            return select.select(fds,[],[],0)[0]
+
+
+class ServerCommunicator():
+    def __init__(self, connection, recv):
+        self.connection = connection
+        self.recv = recv
 
     def runCommand(self, command):
-        """
-        Run a cooker command on the server
-        """
-        self.command_channel.send(self.cooker.command.runCommand(command))
-
-    def stop(self):
-        self.quitin.send("quit")
-        self.quitin.close()
-
-class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
-    def __init__(self, serverImpl, ui_channel, event_queue):
-        self.procserver = serverImpl
-        self.ui_channel = ui_channel
-        self.event_queue = event_queue
-        self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle, self.procserver)
-        self.events = self.event_queue
-        self.terminated = False
-
-    def sigterm_terminate(self):
-        bb.error("UI received SIGTERM")
-        self.terminate()
+        self.connection.send(command)
+        if not self.recv.poll(30):
+            raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server")
+        return self.recv.get()
+
+    def updateFeatureSet(self, featureset):
+        _, error = self.runCommand(["setFeatures", featureset])
+        if error:
+            logger.error("Unable to set the cooker to the correct featureset: %s" % error)
+            raise BaseException(error)
+
+    def getEventHandle(self):
+        handle, error = self.runCommand(["getUIHandlerNum"])
+        if error:
+            logger.error("Unable to get UI Handler Number: %s" % error)
+            raise BaseException(error)
+
+        return handle
+
+    def terminateServer(self):
+        self.connection.send(['terminateServer'])
+        return
+
+class BitBakeProcessServerConnection(object):
+    def __init__(self, ui_channel, recv, eq, sock):
+        self.connection = ServerCommunicator(ui_channel, recv)
+        self.events = eq
+        # Save sock so it doesn't get gc'd for the life of our connection
+        self.socket_connection = sock
 
     def terminate(self):
-        if self.terminated:
-            return
-        self.terminated = True
-        def flushevents():
-            while True:
-                try:
-                    event = self.event_queue.get(block=False)
-                except (Empty, IOError):
-                    break
-                if isinstance(event, logging.LogRecord):
-                    logger.handle(event)
-
-        signal.signal(signal.SIGINT, signal.SIG_IGN)
-        self.procserver.stop()
-
-        while self.procserver.is_alive():
-            flushevents()
-            self.procserver.join(0.1)
-
-        self.ui_channel.close()
-        self.event_queue.close()
-        self.event_queue.setexit()
-
-# Wrap Queue to provide API which isn't server implementation specific
-class ProcessEventQueue(multiprocessing.queues.Queue):
-    def __init__(self, maxsize):
-        multiprocessing.queues.Queue.__init__(self, maxsize, ctx=multiprocessing.get_context())
-        self.exit = False
-        bb.utils.set_process_name("ProcessEQueue")
-
-    def setexit(self):
-        self.exit = True
-
-    def waitEvent(self, timeout):
-        if self.exit:
-            return self.getEvent()
+        self.socket_connection.close()
+        self.connection.connection.close()
+        self.connection.recv.close()
+        return
+
+class BitBakeServer(object):
+    start_log_format = '--- Starting bitbake server pid %s at %s ---'
+    start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
+
+    def __init__(self, lock, sockname, configuration, featureset):
+
+        self.configuration = configuration
+        self.featureset = featureset
+        self.sockname = sockname
+        self.bitbake_lock = lock
+        self.readypipe, self.readypipein = os.pipe()
+
+        # Create server control socket
+        if os.path.exists(sockname):
+            os.unlink(sockname)
+
+        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        # AF_UNIX has path length issues so chdir here to workaround
+        cwd = os.getcwd()
+        logfile = os.path.join(cwd, "bitbake-cookerdaemon.log")
+
         try:
-            if not self.server.is_alive():
-                return self.getEvent()
-            return self.get(True, timeout)
-        except Empty:
-            return None
+            os.chdir(os.path.dirname(sockname))
+            self.sock.bind(os.path.basename(sockname))
+        finally:
+            os.chdir(cwd)
+        self.sock.listen(1)
+
+        os.set_inheritable(self.sock.fileno(), True)
+        startdatetime = datetime.datetime.now()
+        bb.daemonize.createDaemon(self._startServer, logfile)
+        self.sock.close()
+        self.bitbake_lock.close()
+
+        ready = ConnectionReader(self.readypipe)
+        r = ready.poll(30)
+        if r:
+            r = ready.get()
+        if not r or r != "ready":
+            ready.close()
+            bb.error("Unable to start bitbake server")
+            if os.path.exists(logfile):
+                logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
+                started = False
+                lines = []
+                with open(logfile, "r") as f:
+                    for line in f:
+                        if started:
+                            lines.append(line)
+                        else:
+                            res = logstart_re.match(line.rstrip())
+                            if res:
+                                ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
+                                if ldatetime >= startdatetime:
+                                    started = True
+                                    lines.append(line)
+                if lines:
+                    if len(lines) > 10:
+                        bb.error("Last 10 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-10:])))
+                    else:
+                        bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
+            raise SystemExit(1)
+        ready.close()
+        os.close(self.readypipein)
+
+    def _startServer(self):
+        print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
+        server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
+        self.configuration.setServerRegIdleCallback(server.register_idle_function)
+        writer = ConnectionWriter(self.readypipein)
+        try:
+            self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
+            writer.send("ready")
+        except:
+            writer.send("fail")
+            raise
+        finally:
+            os.close(self.readypipein)
+        server.cooker = self.cooker
+        server.server_timeout = self.configuration.server_timeout
+        server.xmlrpcinterface = self.configuration.xmlrpcinterface
+        print("Started bitbake server pid %d" % os.getpid())
+        server.start()
+
+def connectProcessServer(sockname, featureset):
+    # Connect to socket
+    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    # AF_UNIX has path length issues so chdir here to workaround
+    cwd = os.getcwd()
+
+    try:
+        os.chdir(os.path.dirname(sockname))
+        sock.connect(os.path.basename(sockname))
+    finally:
+        os.chdir(cwd)
+
+    readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
+    eq = command_chan_recv = command_chan = None
+
+    try:
+
+        # Send an fd for the remote to write events to
+        readfd, writefd = os.pipe()
+        eq = BBUIEventQueue(readfd)
+        # Send an fd for the remote to recieve commands from
+        readfd1, writefd1 = os.pipe()
+        command_chan = ConnectionWriter(writefd1)
+        # Send an fd for the remote to write commands results to
+        readfd2, writefd2 = os.pipe()
+        command_chan_recv = ConnectionReader(readfd2)
+
+        sendfds(sock, [writefd, readfd1, writefd2])
+
+        server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock)
+
+        # Close the ends of the pipes we won't use
+        for i in [writefd, readfd1, writefd2]:
+            os.close(i)
+
+        server_connection.connection.updateFeatureSet(featureset)
+
+    except (Exception, SystemExit) as e:
+        if command_chan_recv:
+            command_chan_recv.close()
+        if command_chan:
+            command_chan.close()
+        for i in [writefd, readfd1, writefd2]:
+            try:
+                os.close(i)
+            except OSError:
+                pass
+        sock.close()
+        raise
+
+    return server_connection
+
+def sendfds(sock, fds):
+        '''Send an array of fds over an AF_UNIX socket.'''
+        fds = array.array('i', fds)
+        msg = bytes([len(fds) % 256])
+        sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
+
+def recvfds(sock, size):
+        '''Receive an array of fds over an AF_UNIX socket.'''
+        a = array.array('i')
+        bytes_size = a.itemsize * size
+        msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
+        if not msg and not ancdata:
+            raise EOFError
+        try:
+            if len(ancdata) != 1:
+                raise RuntimeError('received %d items of ancdata' %
+                                   len(ancdata))
+            cmsg_level, cmsg_type, cmsg_data = ancdata[0]
+            if (cmsg_level == socket.SOL_SOCKET and
+                cmsg_type == socket.SCM_RIGHTS):
+                if len(cmsg_data) % a.itemsize != 0:
+                    raise ValueError
+                a.frombytes(cmsg_data)
+                assert len(a) % 256 == msg[0]
+                return list(a)
+        except (ValueError, IndexError):
+            pass
+        raise RuntimeError('Invalid data received')
+
+class BBUIEventQueue:
+    def __init__(self, readfd):
+
+        self.eventQueue = []
+        self.eventQueueLock = threading.Lock()
+        self.eventQueueNotify = threading.Event()
+
+        self.reader = ConnectionReader(readfd)
+
+        self.t = threading.Thread()
+        self.t.setDaemon(True)
+        self.t.run = self.startCallbackHandler
+        self.t.start()
 
     def getEvent(self):
-        try:
-            if not self.server.is_alive():
-                self.setexit()
-            return self.get(False)
-        except Empty:
-            if self.exit:
-                sys.exit(1)
+        self.eventQueueLock.acquire()
+
+        if len(self.eventQueue) == 0:
+            self.eventQueueLock.release()
             return None
 
+        item = self.eventQueue.pop(0)
 
-class BitBakeServer(BitBakeBaseServer):
-    def initServer(self, single_use=True):
-        # establish communication channels.  We use bidirectional pipes for
-        # ui <--> server command/response pairs
-        # and a queue for server -> ui event notifications
-        #
-        self.ui_channel, self.server_channel = Pipe()
-        self.event_queue = ProcessEventQueue(0)
-        self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
-        self.event_queue.server = self.serverImpl
+        if len(self.eventQueue) == 0:
+            self.eventQueueNotify.clear()
 
-    def detach(self):
-        self.serverImpl.start()
-        return
+        self.eventQueueLock.release()
+        return item
 
-    def establishConnection(self, featureset):
+    def waitEvent(self, delay):
+        self.eventQueueNotify.wait(delay)
+        return self.getEvent()
 
-        self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
+    def queue_event(self, event):
+        self.eventQueueLock.acquire()
+        self.eventQueue.append(event)
+        self.eventQueueNotify.set()
+        self.eventQueueLock.release()
 
-        _, error = self.connection.connection.runCommand(["setFeatures", featureset])
-        if error:
-            logger.error("Unable to set the cooker to the correct featureset: %s" % error)
-            raise BaseException(error)
-        signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
-        return self.connection
+    def send_event(self, event):
+        self.queue_event(pickle.loads(event))
+
+    def startCallbackHandler(self):
+        bb.utils.set_process_name("UIEventQueue")
+        while True:
+            try:
+                self.reader.wait()
+                event = self.reader.get()
+                self.queue_event(event)
+            except EOFError:
+                # Easiest way to exit is to close the file descriptor to cause an exit
+                break
+        self.reader.close()
+
+class ConnectionReader(object):
+
+    def __init__(self, fd):
+        self.reader = multiprocessing.connection.Connection(fd, writable=False)
+        self.rlock = multiprocessing.Lock()
+
+    def wait(self, timeout=None):
+        return multiprocessing.connection.wait([self.reader], timeout)
+
+    def poll(self, timeout=None):
+        return self.reader.poll(timeout)
+
+    def get(self):
+        with self.rlock:
+            res = self.reader.recv_bytes()
+        return multiprocessing.reduction.ForkingPickler.loads(res)
+
+    def fileno(self):
+        return self.reader.fileno()
+
+    def close(self):
+        return self.reader.close()
+
+
+class ConnectionWriter(object):
+
+    def __init__(self, fd):
+        self.writer = multiprocessing.connection.Connection(fd, readable=False)
+        self.wlock = multiprocessing.Lock()
+        # Why bb.event needs this I have no idea
+        self.event = self
+
+    def send(self, obj):
+        obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
+        with self.wlock:
+            self.writer.send_bytes(obj)
+
+    def fileno(self):
+        return self.writer.fileno()
+
+    def close(self):
+        return self.writer.close()

+ 0 - 422
bitbake/lib/bb/server/xmlrpc.py

@@ -1,422 +0,0 @@
-#
-# BitBake XMLRPC Server
-#
-# Copyright (C) 2006 - 2007  Michael 'Mickey' Lauer
-# Copyright (C) 2006 - 2008  Richard Purdie
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-"""
-    This module implements an xmlrpc server for BitBake.
-
-    Use this by deriving a class from BitBakeXMLRPCServer and then adding
-    methods which you want to "export" via XMLRPC. If the methods have the
-    prefix xmlrpc_, then registering those function will happen automatically,
-    if not, you need to call register_function.
-
-    Use register_idle_function() to add a function which the xmlrpc server
-    calls from within server_forever when no requests are pending. Make sure
-    that those functions are non-blocking or else you will introduce latency
-    in the server's main loop.
-"""
-
-import os
-import sys
-
-import hashlib
-import time
-import socket
-import signal
-import threading
-import pickle
-import inspect
-import select
-import http.client
-import xmlrpc.client
-from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
-
-import bb
-from bb import daemonize
-from bb.ui import uievent
-from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
-
-DEBUG = False
-
-class BBTransport(xmlrpc.client.Transport):
-    def __init__(self, timeout):
-        self.timeout = timeout
-        self.connection_token = None
-        xmlrpc.client.Transport.__init__(self)
-
-    # Modified from default to pass timeout to HTTPConnection
-    def make_connection(self, host):
-        #return an existing connection if possible.  This allows
-        #HTTP/1.1 keep-alive.
-        if self._connection and host == self._connection[0]:
-            return self._connection[1]
-
-        # create a HTTP connection object from a host descriptor
-        chost, self._extra_headers, x509 = self.get_host_info(host)
-        #store the host argument along with the connection object
-        self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
-        return self._connection[1]
-
-    def set_connection_token(self, token):
-        self.connection_token = token
-
-    def send_content(self, h, body):
-        if self.connection_token:
-            h.putheader("Bitbake-token", self.connection_token)
-        xmlrpc.client.Transport.send_content(self, h, body)
-
-def _create_server(host, port, timeout = 60):
-    t = BBTransport(timeout)
-    s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
-    return s, t
-
-def check_connection(remote, timeout):
-    try:
-        host, port = remote.split(":")
-        port = int(port)
-    except Exception as e:
-        bb.warn("Failed to read remote definition (%s)" % str(e))
-        raise e
-
-    server, _transport = _create_server(host, port, timeout)
-    try:
-        ret, err =  server.runCommand(['getVariable', 'TOPDIR'])
-        if err or not ret:
-            return False
-    except ConnectionError:
-        return False
-    return True
-
-class BitBakeServerCommands():
-
-    def __init__(self, server):
-        self.server = server
-        self.has_client = False
-
-    def registerEventHandler(self, host, port):
-        """
-        Register a remote UI Event Handler
-        """
-        s, t = _create_server(host, port)
-
-        # we don't allow connections if the cooker is running
-        if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
-            return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.cooker.state)
-
-        self.event_handle = bb.event.register_UIHhandler(s, True)
-        return self.event_handle, 'OK'
-
-    def unregisterEventHandler(self, handlerNum):
-        """
-        Unregister a remote UI Event Handler
-        """
-        return bb.event.unregister_UIHhandler(handlerNum)
-
-    def runCommand(self, command):
-        """
-        Run a cooker command on the server
-        """
-        return self.cooker.command.runCommand(command, self.server.readonly)
-
-    def getEventHandle(self):
-        return self.event_handle
-
-    def terminateServer(self):
-        """
-        Trigger the server to quit
-        """
-        self.server.quit = True
-        print("Server (cooker) exiting")
-        return
-
-    def addClient(self):
-        if self.has_client:
-            return None
-        token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
-        self.server.set_connection_token(token)
-        self.has_client = True
-        return token
-
-    def removeClient(self):
-        if self.has_client:
-            self.server.set_connection_token(None)
-            self.has_client = False
-            if self.server.single_use:
-                self.server.quit = True
-
-# This request handler checks if the request has a "Bitbake-token" header
-# field (this comes from the client side) and compares it with its internal
-# "Bitbake-token" field (this comes from the server). If the two are not
-# equal, it is assumed that a client is trying to connect to the server
-# while another client is connected to the server. In this case, a 503 error
-# ("service unavailable") is returned to the client.
-class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
-    def __init__(self, request, client_address, server):
-        self.server = server
-        SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
-
-    def do_POST(self):
-        try:
-            remote_token = self.headers["Bitbake-token"]
-        except:
-            remote_token = None
-        if remote_token != self.server.connection_token and remote_token != "observer":
-            self.report_503()
-        else:
-            if remote_token == "observer":
-                self.server.readonly = True
-            else:
-                self.server.readonly = False
-            SimpleXMLRPCRequestHandler.do_POST(self)
-
-    def report_503(self):
-        self.send_response(503)
-        response = 'No more client allowed'
-        self.send_header("Content-type", "text/plain")
-        self.send_header("Content-length", str(len(response)))
-        self.end_headers()
-        self.wfile.write(response)
-
-
-class XMLRPCProxyServer(BaseImplServer):
-    """ not a real working server, but a stub for a proxy server connection
-
-    """
-    def __init__(self, host, port, use_builtin_types=True):
-        self.host = host
-        self.port = port
-
-class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
-    # remove this when you're done with debugging
-    # allow_reuse_address = True
-
-    def __init__(self, interface, single_use=False, idle_timeout=0):
-        """
-        Constructor
-        """
-        BaseImplServer.__init__(self)
-        self.single_use = single_use
-        # Use auto port configuration
-        if (interface[1] == -1):
-            interface = (interface[0], 0)
-        SimpleXMLRPCServer.__init__(self, interface,
-                                    requestHandler=BitBakeXMLRPCRequestHandler,
-                                    logRequests=False, allow_none=True)
-        self.host, self.port = self.socket.getsockname()
-        self.connection_token = None
-        #self.register_introspection_functions()
-        self.commands = BitBakeServerCommands(self)
-        self.autoregister_all_functions(self.commands, "")
-        self.interface = interface
-        self.time = time.time()
-        self.idle_timeout = idle_timeout
-        if idle_timeout:
-            self.register_idle_function(self.handle_idle_timeout, self)
-
-    def addcooker(self, cooker):
-        BaseImplServer.addcooker(self, cooker)
-        self.commands.cooker = cooker
-
-    def autoregister_all_functions(self, context, prefix):
-        """
-        Convenience method for registering all functions in the scope
-        of this class that start with a common prefix
-        """
-        methodlist = inspect.getmembers(context, inspect.ismethod)
-        for name, method in methodlist:
-            if name.startswith(prefix):
-                self.register_function(method, name[len(prefix):])
-
-    def handle_idle_timeout(self, server, data, abort):
-        if not abort:
-            if time.time() - server.time > server.idle_timeout:
-                server.quit = True
-                print("Server idle timeout expired")
-        return []
-
-    def serve_forever(self):
-        # Start the actual XMLRPC server
-        bb.cooker.server_main(self.cooker, self._serve_forever)
-
-    def _serve_forever(self):
-        """
-        Serve Requests. Overloaded to honor a quit command
-        """
-        self.quit = False
-        while not self.quit:
-            fds = [self]
-            nextsleep = 0.1
-            for function, data in list(self._idlefuns.items()):
-                retval = None
-                try:
-                    retval = function(self, data, False)
-                    if retval is False:
-                        del self._idlefuns[function]
-                    elif retval is True:
-                        nextsleep = 0
-                    elif isinstance(retval, float):
-                        if (retval < nextsleep):
-                            nextsleep = retval
-                    else:
-                        fds = fds + retval
-                except SystemExit:
-                    raise
-                except:
-                    import traceback
-                    traceback.print_exc()
-                    if retval == None:
-                        # the function execute failed; delete it
-                        del self._idlefuns[function]
-                    pass
-
-            socktimeout = self.socket.gettimeout() or nextsleep
-            socktimeout = min(socktimeout, nextsleep)
-            # Mirror what BaseServer handle_request would do
-            try:
-                fd_sets = select.select(fds, [], [], socktimeout)
-                if fd_sets[0] and self in fd_sets[0]:
-                    if self.idle_timeout:
-                        self.time = time.time()
-                    self._handle_request_noblock()
-            except IOError:
-                # we ignore interrupted calls
-                pass
-
-        # Tell idle functions we're exiting
-        for function, data in list(self._idlefuns.items()):
-            try:
-                retval = function(self, data, True)
-            except:
-                pass
-        self.server_close()
-        return
-
-    def set_connection_token(self, token):
-        self.connection_token = token
-
-class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
-    def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = None):
-        self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
-        self.clientinfo = clientinfo
-        self.serverImpl = serverImpl
-        self.observer_only = observer_only
-        if featureset:
-            self.featureset = featureset
-        else:
-            self.featureset = []
-
-    def connect(self, token = None):
-        if token is None:
-            if self.observer_only:
-                token = "observer"
-            else:
-                token = self.connection.addClient()
-
-        if token is None:
-            return None
-
-        self.transport.set_connection_token(token)
-        return self
-
-    def setupEventQueue(self):
-        self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
-        for event in bb.event.ui_queue:
-            self.events.queue_event(event)
-
-        _, error = self.connection.runCommand(["setFeatures", self.featureset])
-        if error:
-            # disconnect the client, we can't make the setFeature work
-            self.connection.removeClient()
-            # no need to log it here, the error shall be sent to the client
-            raise BaseException(error)
-
-    def removeClient(self):
-        if not self.observer_only:
-            self.connection.removeClient()
-
-    def terminate(self):
-        # Don't wait for server indefinitely
-        import socket
-        socket.setdefaulttimeout(2)
-        try:
-            self.events.system_quit()
-        except:
-            pass
-        try:
-            self.connection.removeClient()
-        except:
-            pass
-
-class BitBakeServer(BitBakeBaseServer):
-    def initServer(self, interface = ("localhost", 0),
-                   single_use = False, idle_timeout=0):
-        self.interface = interface
-        self.serverImpl = XMLRPCServer(interface, single_use, idle_timeout)
-
-    def detach(self):
-        daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
-        del self.cooker
-
-    def establishConnection(self, featureset):
-        self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
-        return self.connection.connect()
-
-    def set_connection_token(self, token):
-        self.connection.transport.set_connection_token(token)
-
-class BitBakeXMLRPCClient(BitBakeBaseServer):
-
-    def __init__(self, observer_only = False, token = None):
-        self.token = token
-
-        self.observer_only = observer_only
-        # if we need extra caches, just tell the server to load them all
-        pass
-
-    def saveConnectionDetails(self, remote):
-        self.remote = remote
-
-    def establishConnection(self, featureset):
-        # The format of "remote" must be "server:port"
-        try:
-            [host, port] = self.remote.split(":")
-            port = int(port)
-        except Exception as e:
-            bb.warn("Failed to read remote definition (%s)" % str(e))
-            raise e
-
-        # We need our IP for the server connection. We get the IP
-        # by trying to connect with the server
-        try:
-            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-            s.connect((host, port))
-            ip = s.getsockname()[0]
-            s.close()
-        except Exception as e:
-            bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
-            raise e
-        try:
-            self.serverImpl = XMLRPCProxyServer(host, port, use_builtin_types=True)
-            self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
-            return self.connection.connect(self.token)
-        except Exception as e:
-            bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
-            raise e
-
-    def endSession(self):
-        self.connection.removeClient()

+ 154 - 0
bitbake/lib/bb/server/xmlrpcclient.py

@@ -0,0 +1,154 @@
+#
+# BitBake XMLRPC Client Interface
+#
+# Copyright (C) 2006 - 2007  Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008  Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+
+import socket
+import http.client
+import xmlrpc.client
+
+import bb
+from bb.ui import uievent
+
+class BBTransport(xmlrpc.client.Transport):
+    def __init__(self, timeout):
+        self.timeout = timeout
+        self.connection_token = None
+        xmlrpc.client.Transport.__init__(self)
+
+    # Modified from default to pass timeout to HTTPConnection
+    def make_connection(self, host):
+        #return an existing connection if possible.  This allows
+        #HTTP/1.1 keep-alive.
+        if self._connection and host == self._connection[0]:
+            return self._connection[1]
+
+        # create a HTTP connection object from a host descriptor
+        chost, self._extra_headers, x509 = self.get_host_info(host)
+        #store the host argument along with the connection object
+        self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
+        return self._connection[1]
+
+    def set_connection_token(self, token):
+        self.connection_token = token
+
+    def send_content(self, h, body):
+        if self.connection_token:
+            h.putheader("Bitbake-token", self.connection_token)
+        xmlrpc.client.Transport.send_content(self, h, body)
+
+def _create_server(host, port, timeout = 60):
+    t = BBTransport(timeout)
+    s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
+    return s, t
+
+def check_connection(remote, timeout):
+    try:
+        host, port = remote.split(":")
+        port = int(port)
+    except Exception as e:
+        bb.warn("Failed to read remote definition (%s)" % str(e))
+        raise e
+
+    server, _transport = _create_server(host, port, timeout)
+    try:
+        ret, err =  server.runCommand(['getVariable', 'TOPDIR'])
+        if err or not ret:
+            return False
+    except ConnectionError:
+        return False
+    return True
+
+class BitBakeXMLRPCServerConnection(object):
+    def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None):
+        self.connection, self.transport = _create_server(host, port)
+        self.clientinfo = clientinfo
+        self.observer_only = observer_only
+        if featureset:
+            self.featureset = featureset
+        else:
+            self.featureset = []
+
+        self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
+
+        _, error = self.connection.runCommand(["setFeatures", self.featureset])
+        if error:
+            # disconnect the client, we can't make the setFeature work
+            self.connection.removeClient()
+            # no need to log it here, the error shall be sent to the client
+            raise BaseException(error)
+
+    def connect(self, token = None):
+        if token is None:
+            if self.observer_only:
+                token = "observer"
+            else:
+                token = self.connection.addClient()
+
+        if token is None:
+            return None
+
+        self.transport.set_connection_token(token)
+        return self
+
+    def removeClient(self):
+        if not self.observer_only:
+            self.connection.removeClient()
+
+    def terminate(self):
+        # Don't wait for server indefinitely
+        socket.setdefaulttimeout(2)
+        try:
+            self.events.system_quit()
+        except:
+            pass
+        try:
+            self.connection.removeClient()
+        except:
+            pass
+
+def connectXMLRPC(remote, featureset, observer_only = False, token = None):
+    # The format of "remote" must be "server:port"
+    try:
+        [host, port] = remote.split(":")
+        port = int(port)
+    except Exception as e:
+        bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e)))
+        raise e
+
+    # We need our IP for the server connection. We get the IP
+    # by trying to connect with the server
+    try:
+        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        s.connect((host, port))
+        ip = s.getsockname()[0]
+        s.close()
+    except Exception as e:
+        bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
+        raise e
+    try:
+        connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset)
+        return connection.connect(token)
+    except Exception as e:
+        bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
+        raise e
+
+
+

+ 158 - 0
bitbake/lib/bb/server/xmlrpcserver.py

@@ -0,0 +1,158 @@
+#
+# BitBake XMLRPC Server Interface
+#
+# Copyright (C) 2006 - 2007  Michael 'Mickey' Lauer
+# Copyright (C) 2006 - 2008  Richard Purdie
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+
+import hashlib
+import time
+import inspect
+from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
+
+import bb
+
+# This request handler checks if the request has a "Bitbake-token" header
+# field (this comes from the client side) and compares it with its internal
+# "Bitbake-token" field (this comes from the server). If the two are not
+# equal, it is assumed that a client is trying to connect to the server
+# while another client is connected to the server. In this case, a 503 error
+# ("service unavailable") is returned to the client.
+class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+    def __init__(self, request, client_address, server):
+        self.server = server
+        SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
+
+    def do_POST(self):
+        try:
+            remote_token = self.headers["Bitbake-token"]
+        except:
+            remote_token = None
+        if 0 and remote_token != self.server.connection_token and remote_token != "observer":
+            self.report_503()
+        else:
+            if remote_token == "observer":
+                self.server.readonly = True
+            else:
+                self.server.readonly = False
+            SimpleXMLRPCRequestHandler.do_POST(self)
+
+    def report_503(self):
+        self.send_response(503)
+        response = 'No more client allowed'
+        self.send_header("Content-type", "text/plain")
+        self.send_header("Content-length", str(len(response)))
+        self.end_headers()
+        self.wfile.write(bytes(response, 'utf-8'))
+
+class BitBakeXMLRPCServer(SimpleXMLRPCServer):
+    # remove this when you're done with debugging
+    # allow_reuse_address = True
+
+    def __init__(self, interface, cooker, parent):
+        # Use auto port configuration
+        if (interface[1] == -1):
+            interface = (interface[0], 0)
+        SimpleXMLRPCServer.__init__(self, interface,
+                                    requestHandler=BitBakeXMLRPCRequestHandler,
+                                    logRequests=False, allow_none=True)
+        self.host, self.port = self.socket.getsockname()
+        self.interface = interface
+
+        self.connection_token = None
+        self.commands = BitBakeXMLRPCServerCommands(self)
+        self.register_functions(self.commands, "")
+
+        self.cooker = cooker
+        self.parent = parent
+
+
+    def register_functions(self, context, prefix):
+        """
+        Convenience method for registering all functions in the scope
+        of this class that start with a common prefix
+        """
+        methodlist = inspect.getmembers(context, inspect.ismethod)
+        for name, method in methodlist:
+            if name.startswith(prefix):
+                self.register_function(method, name[len(prefix):])
+
+    def get_timeout(self, delay):
+        socktimeout = self.socket.gettimeout() or delay
+        return min(socktimeout, delay)
+
+    def handle_requests(self):
+        self._handle_request_noblock()
+
+class BitBakeXMLRPCServerCommands():
+
+    def __init__(self, server):
+        self.server = server
+        self.has_client = False
+
+    def registerEventHandler(self, host, port):
+        """
+        Register a remote UI Event Handler
+        """
+        s, t = bb.server.xmlrpcclient._create_server(host, port)
+
+        # we don't allow connections if the cooker is running
+        if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
+            return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
+
+        self.event_handle = bb.event.register_UIHhandler(s, True)
+        return self.event_handle, 'OK'
+
+    def unregisterEventHandler(self, handlerNum):
+        """
+        Unregister a remote UI Event Handler
+        """
+        ret = bb.event.unregister_UIHhandler(handlerNum, True)
+        self.event_handle = None
+        return ret
+
+    def runCommand(self, command):
+        """
+        Run a cooker command on the server
+        """
+        return self.server.cooker.command.runCommand(command, self.server.readonly)
+
+    def getEventHandle(self):
+        return self.event_handle
+
+    def terminateServer(self):
+        """
+        Trigger the server to quit
+        """
+        self.server.parent.quit = True
+        print("XMLRPC Server triggering exit")
+        return
+
+    def addClient(self):
+        if self.server.parent.haveui:
+            return None
+        token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
+        self.server.connection_token = token
+        self.server.parent.haveui = True
+        return token
+
+    def removeClient(self):
+        if self.server.parent.haveui:
+            self.server.connection_token = None
+            self.server.parent.haveui = False
+

+ 173 - 55
bitbake/lib/bb/siggen.py

@@ -5,6 +5,8 @@ import re
 import tempfile
 import pickle
 import bb.data
+import difflib
+import simplediff
 from bb.checksum import FileChecksumCache
 
 logger = logging.getLogger('BitBake.SigGen')
@@ -13,7 +15,7 @@ def init(d):
     siggens = [obj for obj in globals().values()
                       if type(obj) is type and issubclass(obj, SignatureGenerator)]
 
-    desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
+    desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
     for sg in siggens:
         if desired == sg.name:
             return sg(d)
@@ -30,6 +32,7 @@ class SignatureGenerator(object):
     name = "noop"
 
     def __init__(self, data):
+        self.basehash = {}
         self.taskhash = {}
         self.runtaskdeps = {}
         self.file_checksum_values = {}
@@ -61,10 +64,13 @@ class SignatureGenerator(object):
         return
 
     def get_taskdata(self):
-       return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
+        return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
 
     def set_taskdata(self, data):
-        self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
+        self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
+
+    def reset(self, data):
+        self.__init__(data)
 
 
 class SignatureGeneratorBasic(SignatureGenerator):
@@ -82,10 +88,10 @@ class SignatureGeneratorBasic(SignatureGenerator):
         self.gendeps = {}
         self.lookupcache = {}
         self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
-        self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
+        self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
         self.taskwhitelist = None
         self.init_rundepcheck(data)
-        checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
+        checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
         if checksum_cache_file:
             self.checksum_cache = FileChecksumCache()
             self.checksum_cache.init_cache(data, checksum_cache_file)
@@ -93,7 +99,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
             self.checksum_cache = None
 
     def init_rundepcheck(self, data):
-        self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
+        self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
         if self.taskwhitelist:
             self.twl = re.compile(self.taskwhitelist)
         else:
@@ -101,6 +107,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
 
     def _build_data(self, fn, d):
 
+        ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
         tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
 
         taskdeps = {}
@@ -133,7 +140,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
                 var = lookupcache[dep]
                 if var is not None:
                     data = data + str(var)
-            self.basehash[fn + "." + task] = hashlib.md5(data.encode("utf-8")).hexdigest()
+            datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
+            k = fn + "." + task
+            if not ignore_mismatch and k in self.basehash and self.basehash[k] != datahash:
+                bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
+            self.basehash[k] = datahash
             taskdeps[task] = alldeps
 
         self.taskdeps[fn] = taskdeps
@@ -150,13 +161,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
 
         try:
             taskdeps = self._build_data(fn, d)
+        except bb.parse.SkipRecipe:
+            raise
         except:
             bb.warn("Error during finalise of %s" % fn)
             raise
 
         #Slow but can be useful for debugging mismatched basehashes
         #for task in self.taskdeps[fn]:
-        #    self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
+        #    self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
 
         for task in taskdeps:
             d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
@@ -182,6 +195,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
     def get_taskhash(self, fn, task, deps, dataCache):
         k = fn + "." + task
         data = dataCache.basetaskhash[k]
+        self.basehash[k] = data
         self.runtaskdeps[k] = []
         self.file_checksum_values[k] = []
         recipename = dataCache.pkg_fn[fn]
@@ -278,6 +292,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
             if 'nostamp:' in self.taints[k]:
                 data['taint'] = self.taints[k]
 
+        computed_basehash = calc_basehash(data)
+        if computed_basehash != self.basehash[k]:
+            bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
+        if runtime and k in self.taskhash:
+            computed_taskhash = calc_taskhash(data)
+            if computed_taskhash != self.taskhash[k]:
+                bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
+                sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
+
         fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
         try:
             with os.fdopen(fd, "wb") as stream:
@@ -292,17 +315,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
                 pass
             raise err
 
-        computed_basehash = calc_basehash(data)
-        if computed_basehash != self.basehash[k]:
-            bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
-        if runtime and k in self.taskhash:
-            computed_taskhash = calc_taskhash(data)
-            if computed_taskhash != self.taskhash[k]:
-                bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
-
-
-    def dump_sigs(self, dataCaches, options):
-        for fn in self.taskdeps:
+    def dump_sigfn(self, fn, dataCaches, options):
+        if fn in self.taskdeps:
             for task in self.taskdeps[fn]:
                 tid = fn + ":" + task
                 (mc, _, _) = bb.runqueue.split_tid(tid)
@@ -340,15 +354,71 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
 
 def dump_this_task(outfile, d):
     import bb.parse
-    fn = d.getVar("BB_FILENAME", True)
-    task = "do_" + d.getVar("BB_CURRENTTASK", True)
+    fn = d.getVar("BB_FILENAME")
+    task = "do_" + d.getVar("BB_CURRENTTASK")
     referencestamp = bb.build.stamp_internal(task, d, None, True)
     bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
 
+def init_colors(enable_color):
+    """Initialise colour dict for passing to compare_sigfiles()"""
+    # First set up the colours
+    colors = {'color_title':   '\033[1;37;40m',
+              'color_default': '\033[0;37;40m',
+              'color_add':     '\033[1;32;40m',
+              'color_remove':  '\033[1;31;40m',
+             }
+    # Leave all keys present but clear the values
+    if not enable_color:
+        for k in colors.keys():
+            colors[k] = ''
+    return colors
+
+def worddiff_str(oldstr, newstr, colors=None):
+    if not colors:
+        colors = init_colors(False)
+    diff = simplediff.diff(oldstr.split(' '), newstr.split(' '))
+    ret = []
+    for change, value in diff:
+        value = ' '.join(value)
+        if change == '=':
+            ret.append(value)
+        elif change == '+':
+            item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors)
+            ret.append(item)
+        elif change == '-':
+            item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors)
+            ret.append(item)
+    whitespace_note = ''
+    if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()):
+        whitespace_note = ' (whitespace changed)'
+    return '"%s"%s' % (' '.join(ret), whitespace_note)
+
+def list_inline_diff(oldlist, newlist, colors=None):
+    if not colors:
+        colors = init_colors(False)
+    diff = simplediff.diff(oldlist, newlist)
+    ret = []
+    for change, value in diff:
+        value = ' '.join(value)
+        if change == '=':
+            ret.append("'%s'" % value)
+        elif change == '+':
+            item = '{color_add}+{value}{color_default}'.format(value=value, **colors)
+            ret.append(item)
+        elif change == '-':
+            item = '{color_remove}-{value}{color_default}'.format(value=value, **colors)
+            ret.append(item)
+    return '[%s]' % (', '.join(ret))
+
 def clean_basepath(a):
-    b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
+    mc = None
+    if a.startswith("multiconfig:"):
+        _, mc, a = a.split(":", 2)
+    b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2]
     if a.startswith("virtual:"):
         b = b + ":" + a.rsplit(":", 1)[0]
+    if mc:
+        b = b + ":multiconfig:" + mc
     return b
 
 def clean_basepaths(a):
@@ -363,9 +433,26 @@ def clean_basepaths_list(a):
         b.append(clean_basepath(x))
     return b
 
-def compare_sigfiles(a, b, recursecb = None):
+def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
     output = []
 
+    colors = init_colors(color)
+    def color_format(formatstr, **values):
+        """
+        Return colour formatted string.
+        NOTE: call with the format string, not an already formatted string
+        containing values (otherwise you could have trouble with { and }
+        characters)
+        """
+        if not formatstr.endswith('{color_default}'):
+            formatstr += '{color_default}'
+        # In newer python 3 versions you can pass both of these directly,
+        # but we only require 3.4 at the moment
+        formatparams = {}
+        formatparams.update(colors)
+        formatparams.update(values)
+        return formatstr.format(**formatparams)
+
     with open(a, 'rb') as f:
         p1 = pickle.Unpickler(f)
         a_data = p1.load()
@@ -419,39 +506,59 @@ def compare_sigfiles(a, b, recursecb = None):
         return changed, added, removed
 
     if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
-        output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
+        output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
         if a_data['basewhitelist'] and b_data['basewhitelist']:
             output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
 
     if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
-        output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
+        output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
         if a_data['taskwhitelist'] and b_data['taskwhitelist']:
             output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
 
     if a_data['taskdeps'] != b_data['taskdeps']:
-        output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
+        output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
 
-    if a_data['basehash'] != b_data['basehash']:
-        output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
+    if a_data['basehash'] != b_data['basehash'] and not collapsed:
+        output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
 
     changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
     if changed:
         for dep in changed:
-            output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
+            output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
             if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
                 output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
     if added:
         for dep in added:
-            output.append("Dependency on variable %s was added" % (dep))
+            output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
     if removed:
         for dep in removed:
-            output.append("Dependency on Variable %s was removed" % (dep))
+            output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
 
 
     changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
     if changed:
         for dep in changed:
-            output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
+            oldval = a_data['varvals'][dep]
+            newval = b_data['varvals'][dep]
+            if newval and oldval and ('\n' in oldval or '\n' in newval):
+                diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='')
+                # Cut off the first two lines, since we aren't interested in
+                # the old/new filename (they are blank anyway in this case)
+                difflines = list(diff)[2:]
+                if color:
+                    # Add colour to diff output
+                    for i, line in enumerate(difflines):
+                        if line.startswith('+'):
+                            line = color_format('{color_add}{line}', line=line)
+                            difflines[i] = line
+                        elif line.startswith('-'):
+                            line = color_format('{color_remove}{line}', line=line)
+                            difflines[i] = line
+                output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines)))
+            elif newval and oldval and (' ' in oldval or ' ' in newval):
+                output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors)))
+            else:
+                output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
 
     if not 'file_checksum_values' in a_data:
          a_data['file_checksum_values'] = {}
@@ -461,32 +568,38 @@ def compare_sigfiles(a, b, recursecb = None):
     changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
     if changed:
         for f, old, new in changed:
-            output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
+            output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new))
     if added:
         for f in added:
-            output.append("Dependency on checksum of file %s was added" % (f))
+            output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f))
     if removed:
         for f in removed:
-            output.append("Dependency on checksum of file %s was removed" % (f))
+            output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f))
 
     if not 'runtaskdeps' in a_data:
          a_data['runtaskdeps'] = {}
     if not 'runtaskdeps' in b_data:
          b_data['runtaskdeps'] = {}
 
-    if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
-        changed = ["Number of task dependencies changed"]
-    else:
-        changed = []
-        for idx, task in enumerate(a_data['runtaskdeps']):
-            a = a_data['runtaskdeps'][idx]
-            b = b_data['runtaskdeps'][idx]
-            if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
-                changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
+    if not collapsed:
+        if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
+            changed = ["Number of task dependencies changed"]
+        else:
+            changed = []
+            for idx, task in enumerate(a_data['runtaskdeps']):
+                a = a_data['runtaskdeps'][idx]
+                b = b_data['runtaskdeps'][idx]
+                if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
+                    changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
 
-    if changed:
-        output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
-        output.append("\n".join(changed))
+        if changed:
+            clean_a = clean_basepaths_list(a_data['runtaskdeps'])
+            clean_b = clean_basepaths_list(b_data['runtaskdeps'])
+            if clean_a != clean_b:
+                output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
+            else:
+                output.append(color_format("{color_title}runtaskdeps changed:"))
+            output.append("\n".join(changed))
 
 
     if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
@@ -502,7 +615,7 @@ def compare_sigfiles(a, b, recursecb = None):
                             #output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
                             bdep_found = True
                 if not bdep_found:
-                    output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
+                    output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
         if removed:
             for dep in removed:
                 adep_found = False
@@ -512,21 +625,25 @@ def compare_sigfiles(a, b, recursecb = None):
                             #output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
                             adep_found = True
                 if not adep_found:
-                    output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
+                    output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
         if changed:
             for dep in changed:
-                output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
+                if not collapsed:
+                    output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
                 if callable(recursecb):
-                    # If a dependent hash changed, might as well print the line above and then defer to the changes in 
-                    # that hash since in all likelyhood, they're the same changes this task also saw.
                     recout = recursecb(dep, a[dep], b[dep])
                     if recout:
-                        output = [output[-1]] + recout
+                        if collapsed:
+                            output.extend(recout)
+                        else:
+                            # If a dependent hash changed, might as well print the line above and then defer to the changes in 
+                            # that hash since in all likelyhood, they're the same changes this task also saw.
+                            output = [output[-1]] + recout
 
     a_taint = a_data.get('taint', None)
     b_taint = b_data.get('taint', None)
     if a_taint != b_taint:
-        output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
+        output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
 
     return output
 
@@ -554,7 +671,8 @@ def calc_taskhash(sigdata):
         data = data + sigdata['runtaskhashes'][dep]
 
     for c in sigdata['file_checksum_values']:
-        data = data + c[1]
+        if c[1]:
+            data = data + c[1]
 
     if 'taint' in sigdata:
         if 'nostamp:' in sigdata['taint']:

+ 16 - 20
bitbake/lib/bb/taskdata.py

@@ -47,7 +47,7 @@ class TaskData:
     """
     BitBake Task Data implementation
     """
-    def __init__(self, abort = True, tryaltconfigs = False, skiplist = None, allowincomplete = False):
+    def __init__(self, abort = True, skiplist = None, allowincomplete = False):
         self.build_targets = {}
         self.run_targets = {}
 
@@ -66,7 +66,6 @@ class TaskData:
         self.failed_fns = []
 
         self.abort = abort
-        self.tryaltconfigs = tryaltconfigs
         self.allowincomplete = allowincomplete
 
         self.skiplist = skiplist
@@ -89,6 +88,19 @@ class TaskData:
 
         self.add_extra_deps(fn, dataCache)
 
+        # Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
+        def handle_deps(task, dep_name, depends, seen):
+            if dep_name in task_deps and task in task_deps[dep_name]:
+                ids = []
+                for dep in task_deps[dep_name][task].split():
+                    if dep:
+                        parts = dep.split(":")
+                        if len(parts) != 2:
+                            bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name))
+                        ids.append((parts[0], parts[1]))
+                        seen(parts[0])
+                depends.extend(ids)
+
         for task in task_deps['tasks']:
 
             tid = "%s:%s" % (fn, task)
@@ -105,24 +117,8 @@ class TaskData:
             self.taskentries[tid].tdepends.extend(parentids)
 
             # Touch all intertask dependencies
-            if 'depends' in task_deps and task in task_deps['depends']:
-                ids = []
-                for dep in task_deps['depends'][task].split():
-                    if dep:
-                        if ":" not in dep:
-                            bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (fn, dep))
-                        ids.append(((dep.split(":")[0]), dep.split(":")[1]))
-                        self.seen_build_target(dep.split(":")[0])
-                self.taskentries[tid].idepends.extend(ids)
-            if 'rdepends' in task_deps and task in task_deps['rdepends']:
-                ids = []
-                for dep in task_deps['rdepends'][task].split():
-                    if dep:
-                        if ":" not in dep:
-                            bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'rdepends' should be specified in the form 'packagename:task'" % (fn, dep))
-                        ids.append(((dep.split(":")[0]), dep.split(":")[1]))
-                        self.seen_run_target(dep.split(":")[0])
-                self.taskentries[tid].irdepends.extend(ids)
+            handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
+            handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
 
         # Work out build dependencies
         if not fn in self.depids:

+ 57 - 9
bitbake/lib/bb/tests/codeparser.py

@@ -49,6 +49,9 @@ class ReferenceTest(unittest.TestCase):
     def assertExecs(self, execs):
         self.assertEqual(self.execs, execs)
 
+    def assertContains(self, contains):
+        self.assertEqual(self.contains, contains)
+
 class VariableReferenceTest(ReferenceTest):
 
     def parseExpression(self, exp):
@@ -68,7 +71,7 @@ class VariableReferenceTest(ReferenceTest):
 
     def test_python_reference(self):
         self.setEmptyVars(["BAR"])
-        self.parseExpression("${@bb.data.getVar('BAR', d, True) + 'foo'}")
+        self.parseExpression("${@d.getVar('BAR') + 'foo'}")
         self.assertReferences(set(["BAR"]))
 
 class ShellReferenceTest(ReferenceTest):
@@ -201,6 +204,7 @@ class PythonReferenceTest(ReferenceTest):
 
         self.references = parsedvar.references | parser.references
         self.execs = parser.execs
+        self.contains = parser.contains
 
     @staticmethod
     def indent(value):
@@ -209,17 +213,17 @@ be. These unit tests are testing snippets."""
         return " " + value
 
     def test_getvar_reference(self):
-        self.parseExpression("bb.data.getVar('foo', d, True)")
+        self.parseExpression("d.getVar('foo')")
         self.assertReferences(set(["foo"]))
         self.assertExecs(set())
 
     def test_getvar_computed_reference(self):
-        self.parseExpression("bb.data.getVar('f' + 'o' + 'o', d, True)")
+        self.parseExpression("d.getVar('f' + 'o' + 'o')")
         self.assertReferences(set())
         self.assertExecs(set())
 
     def test_getvar_exec_reference(self):
-        self.parseExpression("eval('bb.data.getVar(\"foo\", d, True)')")
+        self.parseExpression("eval('d.getVar(\"foo\")')")
         self.assertReferences(set())
         self.assertExecs(set(["eval"]))
 
@@ -265,15 +269,35 @@ be. These unit tests are testing snippets."""
         self.assertExecs(set(["testget"]))
         del self.context["testget"]
 
+    def test_contains(self):
+        self.parseExpression('bb.utils.contains("TESTVAR", "one", "true", "false", d)')
+        self.assertContains({'TESTVAR': {'one'}})
+
+    def test_contains_multi(self):
+        self.parseExpression('bb.utils.contains("TESTVAR", "one two", "true", "false", d)')
+        self.assertContains({'TESTVAR': {'one two'}})
+
+    def test_contains_any(self):
+        self.parseExpression('bb.utils.contains_any("TESTVAR", "hello", "true", "false", d)')
+        self.assertContains({'TESTVAR': {'hello'}})
+
+    def test_contains_any_multi(self):
+        self.parseExpression('bb.utils.contains_any("TESTVAR", "one two three", "true", "false", d)')
+        self.assertContains({'TESTVAR': {'one', 'two', 'three'}})
+
+    def test_contains_filter(self):
+        self.parseExpression('bb.utils.filter("TESTVAR", "hello there world", d)')
+        self.assertContains({'TESTVAR': {'hello', 'there', 'world'}})
+
 
 class DependencyReferenceTest(ReferenceTest):
 
     pydata = """
-bb.data.getVar('somevar', d, True)
+d.getVar('somevar')
 def test(d):
     foo = 'bar %s' % 'foo'
 def test2(d):
-    d.getVar(foo, True)
+    d.getVar(foo)
     d.getVar('bar', False)
     test2(d)
 
@@ -285,9 +309,9 @@ def a():
 
 test(d)
 
-bb.data.expand(bb.data.getVar("something", False, d), d)
-bb.data.expand("${inexpand} somethingelse", d)
-bb.data.getVar(a(), d, False)
+d.expand(d.getVar("something", False))
+d.expand("${inexpand} somethingelse")
+d.getVar(a(), False)
 """
 
     def test_python(self):
@@ -370,6 +394,30 @@ esac
 
         self.assertEqual(deps, set(["oe_libinstall"]))
 
+    def test_contains_vardeps(self):
+        expr = '${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)} \
+                ${@bb.utils.contains("TESTVAR", "testval testval2", "yetanothervalue", "", d)} \
+                ${@bb.utils.contains("TESTVAR", "testval2 testval3", "blah", "", d)} \
+                ${@bb.utils.contains_any("TESTVAR", "testval2 testval3", "lastone", "", d)}'
+        parsedvar = self.d.expandWithRefs(expr, None)
+        # Check contains
+        self.assertEqual(parsedvar.contains, {'TESTVAR': {'testval2 testval3', 'anothervalue', 'somevalue', 'testval testval2', 'testval2', 'testval3'}})
+        # Check dependencies
+        self.d.setVar('ANOTHERVAR', expr)
+        self.d.setVar('TESTVAR', 'anothervalue testval testval2')
+        deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
+        self.assertEqual(sorted(values.splitlines()),
+                         sorted([expr,
+                          'TESTVAR{anothervalue} = Set',
+                          'TESTVAR{somevalue} = Unset',
+                          'TESTVAR{testval testval2} = Set',
+                          'TESTVAR{testval2 testval3} = Unset',
+                          'TESTVAR{testval2} = Set',
+                          'TESTVAR{testval3} = Unset'
+                          ]))
+        # Check final value
+        self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
+
     #Currently no wildcard support
     #def test_vardeps_wildcards(self):
     #    self.d.setVar("oe_libinstall", "echo test")

+ 207 - 46
bitbake/lib/bb/tests/data.py

@@ -77,13 +77,13 @@ class DataExpansions(unittest.TestCase):
         self.assertEqual(str(val), "boo value_of_foo")
 
     def test_python_snippet_getvar(self):
-        val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
+        val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
         self.assertEqual(str(val), "value_of_foo value_of_bar")
 
     def test_python_unexpanded(self):
         self.d.setVar("bar", "${unsetvar}")
-        val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
-        self.assertEqual(str(val), "${@d.getVar('foo', True) + ' ${unsetvar}'}")
+        val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
+        self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}")
 
     def test_python_snippet_syntax_error(self):
         self.d.setVar("FOO", "${@foo = 5}")
@@ -99,7 +99,7 @@ class DataExpansions(unittest.TestCase):
         self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
 
     def test_value_containing_value(self):
-        val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
+        val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
         self.assertEqual(str(val), "value_of_foo value_of_bar")
 
     def test_reference_undefined_var(self):
@@ -109,7 +109,7 @@ class DataExpansions(unittest.TestCase):
     def test_double_reference(self):
         self.d.setVar("BAR", "bar value")
         self.d.setVar("FOO", "${BAR} foo ${BAR}")
-        val = self.d.getVar("FOO", True)
+        val = self.d.getVar("FOO")
         self.assertEqual(str(val), "bar value foo bar value")
 
     def test_direct_recursion(self):
@@ -129,12 +129,12 @@ class DataExpansions(unittest.TestCase):
 
     def test_incomplete_varexp_single_quotes(self):
         self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
-        val = self.d.getVar("FOO", True)
+        val = self.d.getVar("FOO")
         self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
 
     def test_nonstring(self):
         self.d.setVar("TEST", 5)
-        val = self.d.getVar("TEST", True)
+        val = self.d.getVar("TEST")
         self.assertEqual(str(val), "5")
 
     def test_rename(self):
@@ -234,19 +234,19 @@ class TestConcat(unittest.TestCase):
     def test_prepend(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.prependVar("TEST", "${FOO}:")
-        self.assertEqual(self.d.getVar("TEST", True), "foo:val")
+        self.assertEqual(self.d.getVar("TEST"), "foo:val")
 
     def test_append(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.appendVar("TEST", ":${BAR}")
-        self.assertEqual(self.d.getVar("TEST", True), "val:bar")
+        self.assertEqual(self.d.getVar("TEST"), "val:bar")
 
     def test_multiple_append(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.prependVar("TEST", "${FOO}:")
         self.d.appendVar("TEST", ":val2")
         self.d.appendVar("TEST", ":${BAR}")
-        self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
+        self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
 
 class TestConcatOverride(unittest.TestCase):
     def setUp(self):
@@ -258,62 +258,66 @@ class TestConcatOverride(unittest.TestCase):
     def test_prepend(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.setVar("TEST_prepend", "${FOO}:")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "foo:val")
+        self.assertEqual(self.d.getVar("TEST"), "foo:val")
 
     def test_append(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.setVar("TEST_append", ":${BAR}")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "val:bar")
+        self.assertEqual(self.d.getVar("TEST"), "val:bar")
 
     def test_multiple_append(self):
         self.d.setVar("TEST", "${VAL}")
         self.d.setVar("TEST_prepend", "${FOO}:")
         self.d.setVar("TEST_append", ":val2")
         self.d.setVar("TEST_append", ":${BAR}")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
+        self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
 
     def test_append_unset(self):
         self.d.setVar("TEST_prepend", "${FOO}:")
         self.d.setVar("TEST_append", ":val2")
         self.d.setVar("TEST_append", ":${BAR}")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "foo::val2:bar")
+        self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
 
     def test_remove(self):
         self.d.setVar("TEST", "${VAL} ${BAR}")
         self.d.setVar("TEST_remove", "val")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "bar")
+        self.assertEqual(self.d.getVar("TEST"), "bar")
+
+    def test_remove_cleared(self):
+        self.d.setVar("TEST", "${VAL} ${BAR}")
+        self.d.setVar("TEST_remove", "val")
+        self.d.setVar("TEST", "${VAL} ${BAR}")
+        self.assertEqual(self.d.getVar("TEST"), "val bar")
+
+    # Ensure the value is unchanged if we have an inactive remove override
+    # (including that whitespace is preserved)
+    def test_remove_inactive_override(self):
+        self.d.setVar("TEST", "${VAL} ${BAR}    123")
+        self.d.setVar("TEST_remove_inactiveoverride", "val")
+        self.assertEqual(self.d.getVar("TEST"), "val bar    123")
 
     def test_doubleref_remove(self):
         self.d.setVar("TEST", "${VAL} ${BAR}")
         self.d.setVar("TEST_remove", "val")
         self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST_TEST", True), "bar bar")
+        self.assertEqual(self.d.getVar("TEST_TEST"), "bar bar")
 
     def test_empty_remove(self):
         self.d.setVar("TEST", "")
         self.d.setVar("TEST_remove", "val")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "")
+        self.assertEqual(self.d.getVar("TEST"), "")
 
     def test_remove_expansion(self):
         self.d.setVar("BAR", "Z")
         self.d.setVar("TEST", "${BAR}/X Y")
         self.d.setVar("TEST_remove", "${BAR}/X")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "Y")
+        self.assertEqual(self.d.getVar("TEST"), "Y")
 
     def test_remove_expansion_items(self):
         self.d.setVar("TEST", "A B C D")
         self.d.setVar("BAR", "B D")
         self.d.setVar("TEST_remove", "${BAR}")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "A C")
+        self.assertEqual(self.d.getVar("TEST"), "A C")
 
 class TestOverrides(unittest.TestCase):
     def setUp(self):
@@ -322,60 +326,53 @@ class TestOverrides(unittest.TestCase):
         self.d.setVar("TEST", "testvalue")
 
     def test_no_override(self):
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue")
 
     def test_one_override(self):
         self.d.setVar("TEST_bar", "testvalue2")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue2")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue2")
 
     def test_one_override_unset(self):
         self.d.setVar("TEST2_bar", "testvalue2")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST2", True), "testvalue2")
+
+        self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
         self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
 
     def test_multiple_override(self):
         self.d.setVar("TEST_bar", "testvalue2")
         self.d.setVar("TEST_local", "testvalue3")
         self.d.setVar("TEST_foo", "testvalue4")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue3")
         self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
 
     def test_multiple_combined_overrides(self):
         self.d.setVar("TEST_local_foo_bar", "testvalue3")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue3")
 
     def test_multiple_overrides_unset(self):
         self.d.setVar("TEST2_local_foo_bar", "testvalue3")
-        bb.data.update_data(self.d)
-        self.assertEqual(self.d.getVar("TEST2", True), "testvalue3")
+        self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
 
     def test_keyexpansion_override(self):
         self.d.setVar("LOCAL", "local")
         self.d.setVar("TEST_bar", "testvalue2")
         self.d.setVar("TEST_${LOCAL}", "testvalue3")
         self.d.setVar("TEST_foo", "testvalue4")
-        bb.data.update_data(self.d)
         bb.data.expandKeys(self.d)
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue3")
 
     def test_rename_override(self):
         self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
         self.d.setVar("OVERRIDES", "class-target")
-        bb.data.update_data(self.d)
         self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
-        self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools", True), "a")
+        self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
 
     def test_underscore_override(self):
         self.d.setVar("TEST_bar", "testvalue2")
         self.d.setVar("TEST_some_val", "testvalue3")
         self.d.setVar("TEST_foo", "testvalue4")
         self.d.setVar("OVERRIDES", "foo:bar:some_val")
-        self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
+        self.assertEqual(self.d.getVar("TEST"), "testvalue3")
 
 class TestKeyExpansion(unittest.TestCase):
     def setUp(self):
@@ -389,7 +386,7 @@ class TestKeyExpansion(unittest.TestCase):
         with LogRecord() as logs:
             bb.data.expandKeys(self.d)
             self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs))
-        self.assertEqual(self.d.getVar("VAL_foo", True), "A")
+        self.assertEqual(self.d.getVar("VAL_foo"), "A")
 
 class TestFlags(unittest.TestCase):
     def setUp(self):
@@ -444,3 +441,167 @@ class Contains(unittest.TestCase):
 
         self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d))
         self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
+
+
+class Serialize(unittest.TestCase):
+
+    def test_serialize(self):
+        import tempfile
+        import pickle
+        d = bb.data.init()
+        d.enableTracking()
+        d.setVar('HELLO', 'world')
+        d.setVarFlag('HELLO', 'other', 'planet')
+        with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
+            tmpfilename = tmpfile.name
+            pickle.dump(d, tmpfile)
+
+        with open(tmpfilename, 'rb') as f:
+            newd = pickle.load(f)
+
+        os.remove(tmpfilename)
+
+        self.assertEqual(d, newd)
+        self.assertEqual(newd.getVar('HELLO'), 'world')
+        self.assertEqual(newd.getVarFlag('HELLO', 'other'), 'planet')
+
+
+# Remote datastore tests
+# These really only test the interface, since in actual usage we have a
+# tinfoil connector that does everything over RPC, and this doesn't test
+# that.
+
+class TestConnector:
+    d = None
+    def __init__(self, d):
+        self.d = d
+    def getVar(self, name):
+        return self.d._findVar(name)
+    def getKeys(self):
+        return set(self.d.keys())
+    def getVarHistory(self, name):
+        return self.d.varhistory.variable(name)
+    def expandPythonRef(self, varname, expr, d):
+        localdata = self.d.createCopy()
+        for key in d.localkeys():
+            localdata.setVar(d.getVar(key))
+        varparse = bb.data_smart.VariableParse(varname, localdata)
+        return varparse.python_sub(expr)
+    def setVar(self, name, value):
+        self.d.setVar(name, value)
+    def setVarFlag(self, name, flag, value):
+        self.d.setVarFlag(name, flag, value)
+    def delVar(self, name):
+        self.d.delVar(name)
+        return False
+    def delVarFlag(self, name, flag):
+        self.d.delVarFlag(name, flag)
+        return False
+    def renameVar(self, name, newname):
+        self.d.renameVar(name, newname)
+        return False
+
+class Remote(unittest.TestCase):
+    def test_remote(self):
+
+        d1 = bb.data.init()
+        d1.enableTracking()
+        d2 = bb.data.init()
+        d2.enableTracking()
+        connector = TestConnector(d1)
+
+        d2.setVar('_remote_data', connector)
+
+        d1.setVar('HELLO', 'world')
+        d1.setVarFlag('OTHER', 'flagname', 'flagvalue')
+        self.assertEqual(d2.getVar('HELLO'), 'world')
+        self.assertEqual(d2.expand('${HELLO}'), 'world')
+        self.assertEqual(d2.expand('${@d.getVar("HELLO")}'), 'world')
+        self.assertIn('flagname', d2.getVarFlags('OTHER'))
+        self.assertEqual(d2.getVarFlag('OTHER', 'flagname'), 'flagvalue')
+        self.assertEqual(d1.varhistory.variable('HELLO'), d2.varhistory.variable('HELLO'))
+        # Test setVar on client side affects server
+        d2.setVar('HELLO', 'other-world')
+        self.assertEqual(d1.getVar('HELLO'), 'other-world')
+        # Test setVarFlag on client side affects server
+        d2.setVarFlag('HELLO', 'flagname', 'flagvalue')
+        self.assertEqual(d1.getVarFlag('HELLO', 'flagname'), 'flagvalue')
+        # Test client side data is incorporated in python expansion (which is done on server)
+        d2.setVar('FOO', 'bar')
+        self.assertEqual(d2.expand('${@d.getVar("FOO")}'), 'bar')
+        # Test overrides work
+        d1.setVar('FOO_test', 'baz')
+        d1.appendVar('OVERRIDES', ':test')
+        self.assertEqual(d2.getVar('FOO'), 'baz')
+
+
+# Remote equivalents of local test classes
+# Note that these aren't perfect since we only test in one direction
+
+class RemoteDataExpansions(DataExpansions):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1["foo"] = "value_of_foo"
+        self.d1["bar"] = "value_of_bar"
+        self.d1["value_of_foo"] = "value_of_'value_of_foo'"
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteNestedExpansions(TestNestedExpansions):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1["foo"] = "foo"
+        self.d1["bar"] = "bar"
+        self.d1["value_of_foobar"] = "187"
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteConcat(TestConcat):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1.setVar("FOO", "foo")
+        self.d1.setVar("VAL", "val")
+        self.d1.setVar("BAR", "bar")
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteConcatOverride(TestConcatOverride):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1.setVar("FOO", "foo")
+        self.d1.setVar("VAL", "val")
+        self.d1.setVar("BAR", "bar")
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteOverrides(TestOverrides):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1.setVar("OVERRIDES", "foo:bar:local")
+        self.d1.setVar("TEST", "testvalue")
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteKeyExpansion(TestKeyExpansion):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1.setVar("FOO", "foo")
+        self.d1.setVar("BAR", "foo")
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)
+
+class TestRemoteFlags(TestFlags):
+    def setUp(self):
+        self.d1 = bb.data.init()
+        self.d = bb.data.init()
+        self.d1.setVar("foo", "value of foo")
+        self.d1.setVarFlag("foo", "flag1", "value of flag1")
+        self.d1.setVarFlag("foo", "flag2", "value of flag2")
+        connector = TestConnector(self.d1)
+        self.d.setVar('_remote_data', connector)

+ 986 - 0
bitbake/lib/bb/tests/event.py

@@ -0,0 +1,986 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# BitBake Tests for the Event implementation (event.py)
+#
+# Copyright (C) 2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import unittest
+import bb
+import logging
+import bb.compat
+import bb.event
+import importlib
+import threading
+import time
+import pickle
+from unittest.mock import Mock
+from unittest.mock import call
+from bb.msg import BBLogFormatter
+
+
+class EventQueueStubBase(object):
+    """ Base class for EventQueueStub classes """
+    def __init__(self):
+        self.event_calls = []
+        return
+
+    def _store_event_data_string(self, event):
+        if isinstance(event, logging.LogRecord):
+            formatter = BBLogFormatter("%(levelname)s: %(message)s")
+            self.event_calls.append(formatter.format(event))
+        else:
+            self.event_calls.append(bb.event.getName(event))
+        return
+
+
+class EventQueueStub(EventQueueStubBase):
+    """ Class used as specification for UI event handler queue stub objects """
+    def __init__(self):
+        super(EventQueueStub, self).__init__()
+
+    def send(self, event):
+        super(EventQueueStub, self)._store_event_data_string(event)
+
+
+class PickleEventQueueStub(EventQueueStubBase):
+    """ Class used as specification for UI event handler queue stub objects
+        with sendpickle method """
+    def __init__(self):
+        super(PickleEventQueueStub, self).__init__()
+
+    def sendpickle(self, pickled_event):
+        event = pickle.loads(pickled_event)
+        super(PickleEventQueueStub, self)._store_event_data_string(event)
+
+
+class UIClientStub(object):
+    """ Class used as specification for UI event handler stub objects """
+    def __init__(self):
+        self.event = None
+
+
+class EventHandlingTest(unittest.TestCase):
+    """ Event handling test class """
+
+
+    def setUp(self):
+        self._test_process = Mock()
+        ui_client1 = UIClientStub()
+        ui_client2 = UIClientStub()
+        self._test_ui1 = Mock(wraps=ui_client1)
+        self._test_ui2 = Mock(wraps=ui_client2)
+        importlib.reload(bb.event)
+
+    def _create_test_handlers(self):
+        """ Method used to create a test handler ordered dictionary """
+        test_handlers = bb.compat.OrderedDict()
+        test_handlers["handler1"] = self._test_process.handler1
+        test_handlers["handler2"] = self._test_process.handler2
+        return test_handlers
+
+    def test_class_handlers(self):
+        """ Test set_class_handlers and get_class_handlers methods """
+        test_handlers = self._create_test_handlers()
+        bb.event.set_class_handlers(test_handlers)
+        self.assertEqual(test_handlers,
+                         bb.event.get_class_handlers())
+
+    def test_handlers(self):
+        """ Test set_handlers and get_handlers """
+        test_handlers = self._create_test_handlers()
+        bb.event.set_handlers(test_handlers)
+        self.assertEqual(test_handlers,
+                         bb.event.get_handlers())
+
+    def test_clean_class_handlers(self):
+        """ Test clean_class_handlers method """
+        cleanDict = bb.compat.OrderedDict()
+        self.assertEqual(cleanDict,
+                         bb.event.clean_class_handlers())
+
+    def test_register(self):
+        """ Test register method for class handlers """
+        result = bb.event.register("handler", self._test_process.handler)
+        self.assertEqual(result, bb.event.Registered)
+        handlers_dict = bb.event.get_class_handlers()
+        self.assertIn("handler", handlers_dict)
+
+    def test_already_registered(self):
+        """ Test detection of an already registed class handler """
+        bb.event.register("handler", self._test_process.handler)
+        handlers_dict = bb.event.get_class_handlers()
+        self.assertIn("handler", handlers_dict)
+        result = bb.event.register("handler", self._test_process.handler)
+        self.assertEqual(result, bb.event.AlreadyRegistered)
+
+    def test_register_from_string(self):
+        """ Test register method receiving code in string """
+        result = bb.event.register("string_handler", "    return True")
+        self.assertEqual(result, bb.event.Registered)
+        handlers_dict = bb.event.get_class_handlers()
+        self.assertIn("string_handler", handlers_dict)
+
+    def test_register_with_mask(self):
+        """ Test register method with event masking """
+        mask = ["bb.event.OperationStarted",
+                "bb.event.OperationCompleted"]
+        result = bb.event.register("event_handler",
+                                   self._test_process.event_handler,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        handlers_dict = bb.event.get_class_handlers()
+        self.assertIn("event_handler", handlers_dict)
+
+    def test_remove(self):
+        """ Test remove method for class handlers """
+        test_handlers = self._create_test_handlers()
+        bb.event.set_class_handlers(test_handlers)
+        count = len(test_handlers)
+        bb.event.remove("handler1", None)
+        test_handlers = bb.event.get_class_handlers()
+        self.assertEqual(len(test_handlers), count - 1)
+        with self.assertRaises(KeyError):
+            bb.event.remove("handler1", None)
+
+    def test_execute_handler(self):
+        """ Test execute_handler method for class handlers """
+        mask = ["bb.event.OperationProgress"]
+        result = bb.event.register("event_handler",
+                                   self._test_process.event_handler,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        event = bb.event.OperationProgress(current=10, total=100)
+        bb.event.execute_handler("event_handler",
+                                 self._test_process.event_handler,
+                                 event,
+                                 None)
+        self._test_process.event_handler.assert_called_once_with(event)
+
+    def test_fire_class_handlers(self):
+        """ Test fire_class_handlers method """
+        mask = ["bb.event.OperationStarted"]
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        result = bb.event.register("event_handler2",
+                                   self._test_process.event_handler2,
+                                   "*")
+        self.assertEqual(result, bb.event.Registered)
+        event1 = bb.event.OperationStarted()
+        event2 = bb.event.OperationCompleted(total=123)
+        bb.event.fire_class_handlers(event1, None)
+        bb.event.fire_class_handlers(event2, None)
+        bb.event.fire_class_handlers(event2, None)
+        expected_event_handler1 = [call(event1)]
+        expected_event_handler2 = [call(event1),
+                                   call(event2),
+                                   call(event2)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected_event_handler1)
+        self.assertEqual(self._test_process.event_handler2.call_args_list,
+                         expected_event_handler2)
+
+    def test_class_handler_filters(self):
+        """ Test filters for class handlers """
+        mask = ["bb.event.OperationStarted"]
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        result = bb.event.register("event_handler2",
+                                   self._test_process.event_handler2,
+                                   "*")
+        self.assertEqual(result, bb.event.Registered)
+        bb.event.set_eventfilter(
+            lambda name, handler, event, d :
+            name == 'event_handler2' and
+            bb.event.getName(event) == "OperationStarted")
+        event1 = bb.event.OperationStarted()
+        event2 = bb.event.OperationCompleted(total=123)
+        bb.event.fire_class_handlers(event1, None)
+        bb.event.fire_class_handlers(event2, None)
+        bb.event.fire_class_handlers(event2, None)
+        expected_event_handler1 = []
+        expected_event_handler2 = [call(event1)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected_event_handler1)
+        self.assertEqual(self._test_process.event_handler2.call_args_list,
+                         expected_event_handler2)
+
+    def test_change_handler_event_mapping(self):
+        """ Test changing the event mapping for class handlers """
+        event1 = bb.event.OperationStarted()
+        event2 = bb.event.OperationCompleted(total=123)
+
+        # register handler for all events
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   "*")
+        self.assertEqual(result, bb.event.Registered)
+        bb.event.fire_class_handlers(event1, None)
+        bb.event.fire_class_handlers(event2, None)
+        expected = [call(event1), call(event2)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected)
+
+        # unregister handler and register it only for OperationStarted
+        bb.event.remove("event_handler1",
+                        self._test_process.event_handler1)
+        mask = ["bb.event.OperationStarted"]
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        bb.event.fire_class_handlers(event1, None)
+        bb.event.fire_class_handlers(event2, None)
+        expected = [call(event1), call(event2), call(event1)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected)
+
+        # unregister handler and register it only for OperationCompleted
+        bb.event.remove("event_handler1",
+                        self._test_process.event_handler1)
+        mask = ["bb.event.OperationCompleted"]
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   mask)
+        self.assertEqual(result, bb.event.Registered)
+        bb.event.fire_class_handlers(event1, None)
+        bb.event.fire_class_handlers(event2, None)
+        expected = [call(event1), call(event2), call(event1), call(event2)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected)
+
+    def test_register_UIHhandler(self):
+        """ Test register_UIHhandler method """
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+
+    def test_UIHhandler_already_registered(self):
+        """ Test registering an UIHhandler already existing """
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 2)
+
+    def test_unregister_UIHhandler(self):
+        """ Test unregister_UIHhandler method """
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+        result = bb.event.unregister_UIHhandler(1)
+        self.assertIs(result, None)
+
+    def test_fire_ui_handlers(self):
+        """ Test fire_ui_handlers method """
+        self._test_ui1.event = Mock(spec_set=EventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+        self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+        self.assertEqual(result, 2)
+        event1 = bb.event.OperationStarted()
+        bb.event.fire_ui_handlers(event1, None)
+        expected = [call(event1)]
+        self.assertEqual(self._test_ui1.event.send.call_args_list,
+                         expected)
+        expected = [call(pickle.dumps(event1))]
+        self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
+                         expected)
+
+    def test_ui_handler_mask_filter(self):
+        """ Test filters for UI handlers """
+        mask = ["bb.event.OperationStarted"]
+        debug_domains = {}
+        self._test_ui1.event = Mock(spec_set=EventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
+        self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+        bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
+
+        event1 = bb.event.OperationStarted()
+        event2 = bb.event.OperationCompleted(total=1)
+
+        bb.event.fire_ui_handlers(event1, None)
+        bb.event.fire_ui_handlers(event2, None)
+        expected = [call(event1)]
+        self.assertEqual(self._test_ui1.event.send.call_args_list,
+                         expected)
+        expected = [call(pickle.dumps(event1))]
+        self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
+                         expected)
+
+    def test_ui_handler_log_filter(self):
+        """ Test log filters for UI handlers """
+        mask = ["*"]
+        debug_domains = {'BitBake.Foo': logging.WARNING}
+
+        self._test_ui1.event = EventQueueStub()
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
+        self._test_ui2.event = PickleEventQueueStub()
+        result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+        bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
+
+        event1 = bb.event.OperationStarted()
+        bb.event.fire_ui_handlers(event1, None)   # All events match
+
+        event_log_handler = bb.event.LogHandler()
+        logger = logging.getLogger("BitBake")
+        logger.addHandler(event_log_handler)
+        logger1 = logging.getLogger("BitBake.Foo")
+        logger1.warning("Test warning LogRecord1") # Matches debug_domains level
+        logger1.info("Test info LogRecord")        # Filtered out
+        logger2 = logging.getLogger("BitBake.Bar")
+        logger2.error("Test error LogRecord")      # Matches filter base level
+        logger2.warning("Test warning LogRecord2") # Filtered out
+        logger.removeHandler(event_log_handler)
+
+        expected = ['OperationStarted',
+                    'WARNING: Test warning LogRecord1',
+                    'ERROR: Test error LogRecord']
+        self.assertEqual(self._test_ui1.event.event_calls, expected)
+        self.assertEqual(self._test_ui2.event.event_calls, expected)
+
+    def test_fire(self):
+        """ Test fire method used to trigger class and ui event handlers """
+        mask = ["bb.event.ConfigParsed"]
+        result = bb.event.register("event_handler1",
+                                   self._test_process.event_handler1,
+                                   mask)
+
+        self._test_ui1.event = Mock(spec_set=EventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+
+        event1 = bb.event.ConfigParsed()
+        bb.event.fire(event1, None)
+        expected = [call(event1)]
+        self.assertEqual(self._test_process.event_handler1.call_args_list,
+                         expected)
+        self.assertEqual(self._test_ui1.event.send.call_args_list,
+                         expected)
+
+    def test_fire_from_worker(self):
+        """ Test fire_from_worker method """
+        self._test_ui1.event = Mock(spec_set=EventQueueStub)
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+        event1 = bb.event.ConfigParsed()
+        bb.event.fire_from_worker(event1, None)
+        expected = [call(event1)]
+        self.assertEqual(self._test_ui1.event.send.call_args_list,
+                         expected)
+
+    def test_worker_fire(self):
+        """ Test the triggering of bb.event.worker_fire callback """
+        bb.event.worker_fire = Mock()
+        event = bb.event.Event()
+        bb.event.fire(event, None)
+        expected = [call(event, None)]
+        self.assertEqual(bb.event.worker_fire.call_args_list, expected)
+
+    def test_print_ui_queue(self):
+        """ Test print_ui_queue method """
+        event1 = bb.event.OperationStarted()
+        event2 = bb.event.OperationCompleted(total=123)
+        bb.event.fire(event1, None)
+        bb.event.fire(event2, None)
+        event_log_handler = bb.event.LogHandler()
+        logger = logging.getLogger("BitBake")
+        logger.addHandler(event_log_handler)
+        logger.info("Test info LogRecord")
+        logger.warning("Test warning LogRecord")
+        with self.assertLogs("BitBake", level="INFO") as cm:
+            bb.event.print_ui_queue()
+        logger.removeHandler(event_log_handler)
+        self.assertEqual(cm.output,
+                         ["INFO:BitBake:Test info LogRecord",
+                          "WARNING:BitBake:Test warning LogRecord"])
+
+    def _set_threadlock_test_mockups(self):
+        """ Create UI event handler mockups used in enable and disable
+            threadlock tests """
+        def ui1_event_send(event):
+            if type(event) is bb.event.ConfigParsed:
+                self._threadlock_test_calls.append("w1_ui1")
+            if type(event) is bb.event.OperationStarted:
+                self._threadlock_test_calls.append("w2_ui1")
+            time.sleep(2)
+
+        def ui2_event_send(event):
+            if type(event) is bb.event.ConfigParsed:
+                self._threadlock_test_calls.append("w1_ui2")
+            if type(event) is bb.event.OperationStarted:
+                self._threadlock_test_calls.append("w2_ui2")
+            time.sleep(2)
+
+        self._threadlock_test_calls = []
+        self._test_ui1.event = EventQueueStub()
+        self._test_ui1.event.send = ui1_event_send
+        result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
+        self.assertEqual(result, 1)
+        self._test_ui2.event = EventQueueStub()
+        self._test_ui2.event.send = ui2_event_send
+        result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
+        self.assertEqual(result, 2)
+
+    def _set_and_run_threadlock_test_workers(self):
+        """ Create and run the workers used to trigger events in enable and
+            disable threadlock tests """
+        worker1 = threading.Thread(target=self._thread_lock_test_worker1)
+        worker2 = threading.Thread(target=self._thread_lock_test_worker2)
+        worker1.start()
+        time.sleep(1)
+        worker2.start()
+        worker1.join()
+        worker2.join()
+
+    def _thread_lock_test_worker1(self):
+        """ First worker used to fire the ConfigParsed event for enable and
+            disable threadlocks tests """
+        bb.event.fire(bb.event.ConfigParsed(), None)
+
+    def _thread_lock_test_worker2(self):
+        """ Second worker used to fire the OperationStarted event for enable
+            and disable threadlocks tests """
+        bb.event.fire(bb.event.OperationStarted(), None)
+
+    def test_enable_threadlock(self):
+        """ Test enable_threadlock method """
+        self._set_threadlock_test_mockups()
+        bb.event.enable_threadlock()
+        self._set_and_run_threadlock_test_workers()
+        # Calls to UI handlers should be in order as all the registered
+        # handlers for the event coming from the first worker should be
+        # called before processing the event from the second worker.
+        self.assertEqual(self._threadlock_test_calls,
+                         ["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
+
+
+    def test_disable_threadlock(self):
+        """ Test disable_threadlock method """
+        self._set_threadlock_test_mockups()
+        bb.event.disable_threadlock()
+        self._set_and_run_threadlock_test_workers()
+        # Calls to UI handlers should be intertwined together. Thanks to the
+        # delay in the registered handlers for the event coming from the first
+        # worker, the event coming from the second worker starts being
+        # processed before finishing handling the first worker event.
+        self.assertEqual(self._threadlock_test_calls,
+                         ["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
+
+
+class EventClassesTest(unittest.TestCase):
+    """ Event classes test class """
+
+    _worker_pid = 54321
+
+    def setUp(self):
+        bb.event.worker_pid = EventClassesTest._worker_pid
+
+    def test_Event(self):
+        """ Test the Event base class """
+        event = bb.event.Event()
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_HeartbeatEvent(self):
+        """ Test the HeartbeatEvent class """
+        time = 10
+        event = bb.event.HeartbeatEvent(time)
+        self.assertEqual(event.time, time)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_OperationStarted(self):
+        """ Test OperationStarted event class """
+        msg = "Foo Bar"
+        event = bb.event.OperationStarted(msg)
+        self.assertEqual(event.msg, msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_OperationCompleted(self):
+        """ Test OperationCompleted event class """
+        msg = "Foo Bar"
+        total = 123
+        event = bb.event.OperationCompleted(total, msg)
+        self.assertEqual(event.msg, msg)
+        self.assertEqual(event.total, total)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_OperationProgress(self):
+        """ Test OperationProgress event class """
+        msg = "Foo Bar"
+        total = 123
+        current = 111
+        event = bb.event.OperationProgress(current, total, msg)
+        self.assertEqual(event.msg, msg + ": %s/%s" % (current, total))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ConfigParsed(self):
+        """ Test the ConfigParsed class """
+        event = bb.event.ConfigParsed()
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_MultiConfigParsed(self):
+        """ Test MultiConfigParsed event class """
+        mcdata = {"foobar": "Foo Bar"}
+        event = bb.event.MultiConfigParsed(mcdata)
+        self.assertEqual(event.mcdata, mcdata)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_RecipeEvent(self):
+        """ Test RecipeEvent event base class """
+        callback = lambda a: 2 * a
+        event = bb.event.RecipeEvent(callback)
+        self.assertEqual(event.fn(1), callback(1))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_RecipePreFinalise(self):
+        """ Test RecipePreFinalise event class """
+        callback = lambda a: 2 * a
+        event = bb.event.RecipePreFinalise(callback)
+        self.assertEqual(event.fn(1), callback(1))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_RecipeTaskPreProcess(self):
+        """ Test RecipeTaskPreProcess event class """
+        callback = lambda a: 2 * a
+        tasklist = [("foobar", callback)]
+        event = bb.event.RecipeTaskPreProcess(callback, tasklist)
+        self.assertEqual(event.fn(1), callback(1))
+        self.assertEqual(event.tasklist, tasklist)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_RecipeParsed(self):
+        """ Test RecipeParsed event base class """
+        callback = lambda a: 2 * a
+        event = bb.event.RecipeParsed(callback)
+        self.assertEqual(event.fn(1), callback(1))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_StampUpdate(self):
+        targets = ["foo", "bar"]
+        stampfns = [lambda:"foobar"]
+        event = bb.event.StampUpdate(targets, stampfns)
+        self.assertEqual(event.targets, targets)
+        self.assertEqual(event.stampPrefix, stampfns)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_BuildBase(self):
+        """ Test base class for bitbake build events """
+        name = "foo"
+        pkgs = ["bar"]
+        failures = 123
+        event = bb.event.BuildBase(name, pkgs, failures)
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), failures)
+        name = event.name = "bar"
+        pkgs = event.pkgs = ["foo"]
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), failures)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_BuildInit(self):
+        """ Test class for bitbake build invocation events """
+        event = bb.event.BuildInit()
+        self.assertEqual(event.name, None)
+        self.assertEqual(event.pkgs, [])
+        self.assertEqual(event.getFailures(), 0)
+        name = event.name = "bar"
+        pkgs = event.pkgs = ["foo"]
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), 0)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_BuildStarted(self):
+        """ Test class for build started events """
+        name = "foo"
+        pkgs = ["bar"]
+        failures = 123
+        event = bb.event.BuildStarted(name, pkgs, failures)
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), failures)
+        self.assertEqual(event.msg, "Building Started")
+        name = event.name = "bar"
+        pkgs = event.pkgs = ["foo"]
+        msg = event.msg = "foobar"
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), failures)
+        self.assertEqual(event.msg, msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_BuildCompleted(self):
+        """ Test class for build completed events """
+        total = 1000
+        name = "foo"
+        pkgs = ["bar"]
+        failures = 123
+        interrupted = 1
+        event = bb.event.BuildCompleted(total, name, pkgs, failures,
+                                        interrupted)
+        self.assertEqual(event.name, name)
+        self.assertEqual(event.pkgs, pkgs)
+        self.assertEqual(event.getFailures(), failures)
+        self.assertEqual(event.msg, "Building Failed")
+        event2 = bb.event.BuildCompleted(total, name, pkgs)
+        self.assertEqual(event2.name, name)
+        self.assertEqual(event2.pkgs, pkgs)
+        self.assertEqual(event2.getFailures(), 0)
+        self.assertEqual(event2.msg, "Building Succeeded")
+        self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+    def test_DiskFull(self):
+        """ Test DiskFull event class """
+        dev = "/dev/foo"
+        type = "ext4"
+        freespace = "104M"
+        mountpoint = "/"
+        event = bb.event.DiskFull(dev, type, freespace, mountpoint)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_MonitorDiskEvent(self):
+        """ Test MonitorDiskEvent class """
+        available_bytes = 10000000
+        free_bytes = 90000000
+        total_bytes = 1000000000
+        du = bb.event.DiskUsageSample(available_bytes, free_bytes,
+                                      total_bytes)
+        event = bb.event.MonitorDiskEvent(du)
+        self.assertEqual(event.disk_usage.available_bytes, available_bytes)
+        self.assertEqual(event.disk_usage.free_bytes, free_bytes)
+        self.assertEqual(event.disk_usage.total_bytes, total_bytes)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_NoProvider(self):
+        """ Test NoProvider event class """
+        item = "foobar"
+        event1 = bb.event.NoProvider(item)
+        self.assertEqual(event1.getItem(), item)
+        self.assertEqual(event1.isRuntime(), False)
+        self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'")
+        runtime = True
+        dependees = ["foo", "bar"]
+        reasons = None
+        close_matches = ["foibar", "footbar"]
+        event2 = bb.event.NoProvider(item, runtime, dependees, reasons,
+                                     close_matches)
+        self.assertEqual(event2.isRuntime(), True)
+        expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
+                    " on or otherwise requires it). Close matches:\n"
+                    "  foibar\n"
+                    "  footbar")
+        self.assertEqual(str(event2), expected)
+        reasons = ["Item does not exist on database"]
+        close_matches = ["foibar", "footbar"]
+        event3 = bb.event.NoProvider(item, runtime, dependees, reasons,
+                                     close_matches)
+        expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
+                    " on or otherwise requires it)\n"
+                    "Item does not exist on database")
+        self.assertEqual(str(event3), expected)
+        self.assertEqual(event3.pid, EventClassesTest._worker_pid)
+
+    def test_MultipleProviders(self):
+        """ Test MultipleProviders event class """
+        item = "foobar"
+        candidates = ["foobarv1", "foobars"]
+        event1 = bb.event.MultipleProviders(item, candidates)
+        self.assertEqual(event1.isRuntime(), False)
+        self.assertEqual(event1.getItem(), item)
+        self.assertEqual(event1.getCandidates(), candidates)
+        expected = ("Multiple providers are available for foobar (foobarv1,"
+                    " foobars)\n"
+                    "Consider defining a PREFERRED_PROVIDER entry to match "
+                    "foobar")
+        self.assertEqual(str(event1), expected)
+        runtime = True
+        event2 = bb.event.MultipleProviders(item, candidates, runtime)
+        self.assertEqual(event2.isRuntime(), runtime)
+        expected = ("Multiple providers are available for runtime foobar "
+                    "(foobarv1, foobars)\n"
+                    "Consider defining a PREFERRED_RPROVIDER entry to match "
+                    "foobar")
+        self.assertEqual(str(event2), expected)
+        self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+    def test_ParseStarted(self):
+        """ Test ParseStarted event class """
+        total = 123
+        event = bb.event.ParseStarted(total)
+        self.assertEqual(event.msg, "Recipe parsing Started")
+        self.assertEqual(event.total, total)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ParseCompleted(self):
+        """ Test ParseCompleted event class """
+        cached = 10
+        parsed = 13
+        skipped = 7
+        virtuals = 2
+        masked = 1
+        errors = 0
+        total = 23
+        event = bb.event.ParseCompleted(cached, parsed, skipped, masked,
+                                        virtuals, errors, total)
+        self.assertEqual(event.msg, "Recipe parsing Completed")
+        expected = [cached, parsed, skipped, virtuals, masked, errors,
+                    cached + parsed, total]
+        actual = [event.cached, event.parsed, event.skipped, event.virtuals,
+                  event.masked, event.errors, event.sofar, event.total]
+        self.assertEqual(str(actual), str(expected))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ParseProgress(self):
+        """ Test ParseProgress event class """
+        current = 10
+        total = 100
+        event = bb.event.ParseProgress(current, total)
+        self.assertEqual(event.msg,
+                         "Recipe parsing" + ": %s/%s" % (current, total))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_CacheLoadStarted(self):
+        """ Test CacheLoadStarted event class """
+        total = 123
+        event = bb.event.CacheLoadStarted(total)
+        self.assertEqual(event.msg, "Loading cache Started")
+        self.assertEqual(event.total, total)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_CacheLoadProgress(self):
+        """ Test CacheLoadProgress event class """
+        current = 10
+        total = 100
+        event = bb.event.CacheLoadProgress(current, total)
+        self.assertEqual(event.msg,
+                         "Loading cache" + ": %s/%s" % (current, total))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_CacheLoadCompleted(self):
+        """ Test CacheLoadCompleted event class """
+        total = 23
+        num_entries = 12
+        event = bb.event.CacheLoadCompleted(total, num_entries)
+        self.assertEqual(event.msg, "Loading cache Completed")
+        expected = [total, num_entries]
+        actual = [event.total, event.num_entries]
+        self.assertEqual(str(actual), str(expected))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_TreeDataPreparationStarted(self):
+        """ Test TreeDataPreparationStarted event class """
+        event = bb.event.TreeDataPreparationStarted()
+        self.assertEqual(event.msg, "Preparing tree data Started")
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_TreeDataPreparationProgress(self):
+        """ Test TreeDataPreparationProgress event class """
+        current = 10
+        total = 100
+        event = bb.event.TreeDataPreparationProgress(current, total)
+        self.assertEqual(event.msg,
+                         "Preparing tree data" + ": %s/%s" % (current, total))
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_TreeDataPreparationCompleted(self):
+        """ Test TreeDataPreparationCompleted event class """
+        total = 23
+        event = bb.event.TreeDataPreparationCompleted(total)
+        self.assertEqual(event.msg, "Preparing tree data Completed")
+        self.assertEqual(event.total, total)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_DepTreeGenerated(self):
+        """ Test DepTreeGenerated event class """
+        depgraph = Mock()
+        event = bb.event.DepTreeGenerated(depgraph)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_TargetsTreeGenerated(self):
+        """ Test TargetsTreeGenerated event class """
+        model = Mock()
+        event = bb.event.TargetsTreeGenerated(model)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ReachableStamps(self):
+        """ Test ReachableStamps event class """
+        stamps = [Mock(), Mock()]
+        event = bb.event.ReachableStamps(stamps)
+        self.assertEqual(event.stamps, stamps)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_FilesMatchingFound(self):
+        """ Test FilesMatchingFound event class """
+        pattern = "foo.*bar"
+        matches = ["foobar"]
+        event = bb.event.FilesMatchingFound(pattern, matches)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ConfigFilesFound(self):
+        """ Test ConfigFilesFound event class """
+        variable = "FOO_BAR"
+        values = ["foo", "bar"]
+        event = bb.event.ConfigFilesFound(variable, values)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ConfigFilePathFound(self):
+        """ Test ConfigFilePathFound event class """
+        path = "/foo/bar"
+        event = bb.event.ConfigFilePathFound(path)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_message_classes(self):
+        """ Test message event classes """
+        msg = "foobar foo bar"
+        event = bb.event.MsgBase(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgDebug(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgNote(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgWarn(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgError(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgFatal(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+        event = bb.event.MsgPlain(msg)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_LogExecTTY(self):
+        """ Test LogExecTTY event class """
+        msg = "foo bar"
+        prog = "foo.sh"
+        sleep_delay = 10
+        retries = 3
+        event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries)
+        self.assertEqual(event.msg, msg)
+        self.assertEqual(event.prog, prog)
+        self.assertEqual(event.sleep_delay, sleep_delay)
+        self.assertEqual(event.retries, retries)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def _throw_zero_division_exception(self):
+        a = 1 / 0
+        return
+
+    def _worker_handler(self, event, d):
+        self._returned_event = event
+        return
+
+    def test_LogHandler(self):
+        """ Test LogHandler class """
+        logger = logging.getLogger("TestEventClasses")
+        logger.propagate = False
+        handler = bb.event.LogHandler(logging.INFO)
+        logger.addHandler(handler)
+        bb.event.worker_fire = self._worker_handler
+        try:
+            self._throw_zero_division_exception()
+        except ZeroDivisionError as ex:
+            logger.exception(ex)
+        event = self._returned_event
+        try:
+            pe = pickle.dumps(event)
+            newevent = pickle.loads(pe)
+        except:
+            self.fail('Logged event is not serializable')
+        self.assertEqual(event.taskpid, EventClassesTest._worker_pid)
+
+    def test_MetadataEvent(self):
+        """ Test MetadataEvent class """
+        eventtype = "footype"
+        eventdata = {"foo": "bar"}
+        event = bb.event.MetadataEvent(eventtype, eventdata)
+        self.assertEqual(event.type, eventtype)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ProcessStarted(self):
+        """ Test ProcessStarted class """
+        processname = "foo"
+        total = 9783128974
+        event = bb.event.ProcessStarted(processname, total)
+        self.assertEqual(event.processname, processname)
+        self.assertEqual(event.total, total)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ProcessProgress(self):
+        """ Test ProcessProgress class """
+        processname = "foo"
+        progress = 243224
+        event = bb.event.ProcessProgress(processname, progress)
+        self.assertEqual(event.processname, processname)
+        self.assertEqual(event.progress, progress)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_ProcessFinished(self):
+        """ Test ProcessFinished class """
+        processname = "foo"
+        total = 1242342344
+        event = bb.event.ProcessFinished(processname)
+        self.assertEqual(event.processname, processname)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_SanityCheck(self):
+        """ Test SanityCheck class """
+        event1 = bb.event.SanityCheck()
+        self.assertEqual(event1.generateevents, True)
+        self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+        generateevents = False
+        event2 = bb.event.SanityCheck(generateevents)
+        self.assertEqual(event2.generateevents, generateevents)
+        self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+    def test_SanityCheckPassed(self):
+        """ Test SanityCheckPassed class """
+        event = bb.event.SanityCheckPassed()
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)
+
+    def test_SanityCheckFailed(self):
+        """ Test SanityCheckFailed class """
+        msg = "The sanity test failed."
+        event1 = bb.event.SanityCheckFailed(msg)
+        self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+        network_error = True
+        event2 = bb.event.SanityCheckFailed(msg, network_error)
+        self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+
+    def test_network_event_classes(self):
+        """ Test network event classes """
+        event1 = bb.event.NetworkTest()
+        generateevents = False
+        self.assertEqual(event1.pid, EventClassesTest._worker_pid)
+        event2 = bb.event.NetworkTest(generateevents)
+        self.assertEqual(event2.pid, EventClassesTest._worker_pid)
+        event3 = bb.event.NetworkTestPassed()
+        self.assertEqual(event3.pid, EventClassesTest._worker_pid)
+        event4 = bb.event.NetworkTestFailed()
+        self.assertEqual(event4.pid, EventClassesTest._worker_pid)
+
+    def test_FindSigInfoResult(self):
+        """ Test FindSigInfoResult event class """
+        result = [Mock()]
+        event = bb.event.FindSigInfoResult(result)
+        self.assertEqual(event.result, result)
+        self.assertEqual(event.pid, EventClassesTest._worker_pid)

+ 793 - 175
bitbake/lib/bb/tests/fetch.py

@@ -28,6 +28,11 @@ from bb.fetch2 import URI
 from bb.fetch2 import FetchMethod
 import bb
 
+def skipIfNoNetwork():
+    if os.environ.get("BB_SKIP_NETTESTS") == "yes":
+        return unittest.skip("Network tests being skipped")
+    return lambda f: f
+
 class URITest(unittest.TestCase):
     test_uris = {
         "http://www.google.com/index.html" : {
@@ -518,141 +523,153 @@ class FetcherLocalTest(FetcherTest):
             self.fetchUnpack(['file://a;subdir=/bin/sh'])
 
 class FetcherNetworkTest(FetcherTest):
+    @skipIfNoNetwork()
+    def test_fetch(self):
+        fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
+        fetcher.download()
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892)
+        self.d.setVar("BB_NO_NETWORK", "1")
+        fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
+        fetcher.download()
+        fetcher.unpack(self.unpackdir)
+        self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9)
+        self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9)
 
-    if os.environ.get("BB_SKIP_NETTESTS") == "yes":
-        print("Unset BB_SKIP_NETTESTS to run network tests")
-    else:
-        def test_fetch(self):
-            fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
-            fetcher.download()
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892)
-            self.d.setVar("BB_NO_NETWORK", "1")
-            fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
-            fetcher.download()
-            fetcher.unpack(self.unpackdir)
-            self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9)
-            self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9)
-
-        def test_fetch_mirror(self):
-            self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
-            fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
-            fetcher.download()
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
-
-        def test_fetch_mirror_of_mirror(self):
-            self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake")
-            fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
-            fetcher.download()
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
-
-        def test_fetch_file_mirror_of_mirror(self):
-            self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake")
-            fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
-            os.mkdir(self.dldir + "/some2where")
-            fetcher.download()
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
-
-        def test_fetch_premirror(self):
-            self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
-            fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
-            fetcher.download()
-            self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
-
-        def gitfetcher(self, url1, url2):
-            def checkrevision(self, fetcher):
-                fetcher.unpack(self.unpackdir)
-                revision = bb.process.run("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git")[0].strip()
-                self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
-
-            self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1")
-            self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
-            fetcher = bb.fetch.Fetch([url1], self.d)
-            fetcher.download()
-            checkrevision(self, fetcher)
-            # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works
-            bb.utils.prunedir(self.dldir + "/git2/")
-            bb.utils.prunedir(self.unpackdir)
-            self.d.setVar("BB_NO_NETWORK", "1")
-            fetcher = bb.fetch.Fetch([url2], self.d)
-            fetcher.download()
-            checkrevision(self, fetcher)
-
-        def test_gitfetch(self):
-            url1 = url2 = "git://git.openembedded.org/bitbake"
-            self.gitfetcher(url1, url2)
-
-        def test_gitfetch_goodsrcrev(self):
-            # SRCREV is set but matches rev= parameter
-            url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
-            self.gitfetcher(url1, url2)
-
-        def test_gitfetch_badsrcrev(self):
-            # SRCREV is set but does not match rev= parameter
-            url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5"
-            self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
-
-        def test_gitfetch_tagandrev(self):
-            # SRCREV is set but does not match rev= parameter
-            url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
-            self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
-
-        def test_gitfetch_localusehead(self):
-            # Create dummy local Git repo
-            src_dir = tempfile.mkdtemp(dir=self.tempdir,
-                                       prefix='gitfetch_localusehead_')
-            src_dir = os.path.abspath(src_dir)
-            bb.process.run("git init", cwd=src_dir)
-            bb.process.run("git commit --allow-empty -m'Dummy commit'",
-                           cwd=src_dir)
-            # Use other branch than master
-            bb.process.run("git checkout -b my-devel", cwd=src_dir)
-            bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
-                           cwd=src_dir)
-            stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
-            orig_rev = stdout[0].strip()
-
-            # Fetch and check revision
-            self.d.setVar("SRCREV", "AUTOINC")
-            url = "git://" + src_dir + ";protocol=file;usehead=1"
-            fetcher = bb.fetch.Fetch([url], self.d)
-            fetcher.download()
-            fetcher.unpack(self.unpackdir)
-            stdout = bb.process.run("git rev-parse HEAD",
-                                    cwd=os.path.join(self.unpackdir, 'git'))
-            unpack_rev = stdout[0].strip()
-            self.assertEqual(orig_rev, unpack_rev)
-
-        def test_gitfetch_remoteusehead(self):
-            url = "git://git.openembedded.org/bitbake;usehead=1"
-            self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
-
-        def test_gitfetch_premirror(self):
-            url1 = "git://git.openembedded.org/bitbake"
-            url2 = "git://someserver.org/bitbake"
-            self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
-            self.gitfetcher(url1, url2)
-
-        def test_gitfetch_premirror2(self):
-            url1 = url2 = "git://someserver.org/bitbake"
-            self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
-            self.gitfetcher(url1, url2)
-
-        def test_gitfetch_premirror3(self):
-            realurl = "git://git.openembedded.org/bitbake"
-            dummyurl = "git://someserver.org/bitbake"
-            self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
-            os.chdir(self.tempdir)
-            bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True)
-            self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir))
-            self.gitfetcher(dummyurl, dummyurl)
-
-        def test_git_submodule(self):
-            fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d)
-            fetcher.download()
-            # Previous cwd has been deleted
-            os.chdir(os.path.dirname(self.unpackdir))
+    @skipIfNoNetwork()
+    def test_fetch_mirror(self):
+        self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
+        fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+        fetcher.download()
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+    @skipIfNoNetwork()
+    def test_fetch_mirror_of_mirror(self):
+        self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake")
+        fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+        fetcher.download()
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+    @skipIfNoNetwork()
+    def test_fetch_file_mirror_of_mirror(self):
+        self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake")
+        fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+        os.mkdir(self.dldir + "/some2where")
+        fetcher.download()
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+    @skipIfNoNetwork()
+    def test_fetch_premirror(self):
+        self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
+        fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
+        fetcher.download()
+        self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
+
+    @skipIfNoNetwork()
+    def gitfetcher(self, url1, url2):
+        def checkrevision(self, fetcher):
             fetcher.unpack(self.unpackdir)
+            revision = bb.process.run("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git")[0].strip()
+            self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
+
+        self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1")
+        self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
+        fetcher = bb.fetch.Fetch([url1], self.d)
+        fetcher.download()
+        checkrevision(self, fetcher)
+        # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works
+        bb.utils.prunedir(self.dldir + "/git2/")
+        bb.utils.prunedir(self.unpackdir)
+        self.d.setVar("BB_NO_NETWORK", "1")
+        fetcher = bb.fetch.Fetch([url2], self.d)
+        fetcher.download()
+        checkrevision(self, fetcher)
+
+    @skipIfNoNetwork()
+    def test_gitfetch(self):
+        url1 = url2 = "git://git.openembedded.org/bitbake"
+        self.gitfetcher(url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_goodsrcrev(self):
+        # SRCREV is set but matches rev= parameter
+        url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
+        self.gitfetcher(url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_badsrcrev(self):
+        # SRCREV is set but does not match rev= parameter
+        url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5"
+        self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_tagandrev(self):
+        # SRCREV is set but does not match rev= parameter
+        url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5"
+        self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_localusehead(self):
+        # Create dummy local Git repo
+        src_dir = tempfile.mkdtemp(dir=self.tempdir,
+                                   prefix='gitfetch_localusehead_')
+        src_dir = os.path.abspath(src_dir)
+        bb.process.run("git init", cwd=src_dir)
+        bb.process.run("git commit --allow-empty -m'Dummy commit'",
+                       cwd=src_dir)
+        # Use other branch than master
+        bb.process.run("git checkout -b my-devel", cwd=src_dir)
+        bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
+                       cwd=src_dir)
+        stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
+        orig_rev = stdout[0].strip()
+
+        # Fetch and check revision
+        self.d.setVar("SRCREV", "AUTOINC")
+        url = "git://" + src_dir + ";protocol=file;usehead=1"
+        fetcher = bb.fetch.Fetch([url], self.d)
+        fetcher.download()
+        fetcher.unpack(self.unpackdir)
+        stdout = bb.process.run("git rev-parse HEAD",
+                                cwd=os.path.join(self.unpackdir, 'git'))
+        unpack_rev = stdout[0].strip()
+        self.assertEqual(orig_rev, unpack_rev)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_remoteusehead(self):
+        url = "git://git.openembedded.org/bitbake;usehead=1"
+        self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_premirror(self):
+        url1 = "git://git.openembedded.org/bitbake"
+        url2 = "git://someserver.org/bitbake"
+        self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
+        self.gitfetcher(url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_premirror2(self):
+        url1 = url2 = "git://someserver.org/bitbake"
+        self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
+        self.gitfetcher(url1, url2)
+
+    @skipIfNoNetwork()
+    def test_gitfetch_premirror3(self):
+        realurl = "git://git.openembedded.org/bitbake"
+        dummyurl = "git://someserver.org/bitbake"
+        self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
+        os.chdir(self.tempdir)
+        bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True)
+        self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir))
+        self.gitfetcher(dummyurl, dummyurl)
+
+    @skipIfNoNetwork()
+    def test_git_submodule(self):
+        fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d)
+        fetcher.download()
+        # Previous cwd has been deleted
+        os.chdir(os.path.dirname(self.unpackdir))
+        fetcher.unpack(self.unpackdir)
 
 
 class TrustedNetworksTest(FetcherTest):
@@ -782,30 +799,32 @@ class FetchLatestVersionTest(FetcherTest):
         ("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html", "http://download.oracle.com/otn/berkeley-db/(?P<name>db-)(?P<pver>((\d+[\.\-_]*)+))\.tar\.gz")
             : "6.1.19",
     }
-    if os.environ.get("BB_SKIP_NETTESTS") == "yes":
-        print("Unset BB_SKIP_NETTESTS to run network tests")
-    else:
-        def test_git_latest_versionstring(self):
-            for k, v in self.test_git_uris.items():
-                self.d.setVar("PN", k[0])
-                self.d.setVar("SRCREV", k[2])
-                self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3])
-                ud = bb.fetch2.FetchData(k[1], self.d)
-                pupver= ud.method.latest_versionstring(ud, self.d)
-                verstring = pupver[0]
-                r = bb.utils.vercmp_string(v, verstring)
-                self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
-
-        def test_wget_latest_versionstring(self):
-            for k, v in self.test_wget_uris.items():
-                self.d.setVar("PN", k[0])
-                self.d.setVar("UPSTREAM_CHECK_URI", k[2])
-                self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
-                ud = bb.fetch2.FetchData(k[1], self.d)
-                pupver = ud.method.latest_versionstring(ud, self.d)
-                verstring = pupver[0]
-                r = bb.utils.vercmp_string(v, verstring)
-                self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
+
+    @skipIfNoNetwork()
+    def test_git_latest_versionstring(self):
+        for k, v in self.test_git_uris.items():
+            self.d.setVar("PN", k[0])
+            self.d.setVar("SRCREV", k[2])
+            self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3])
+            ud = bb.fetch2.FetchData(k[1], self.d)
+            pupver= ud.method.latest_versionstring(ud, self.d)
+            verstring = pupver[0]
+            self.assertTrue(verstring, msg="Could not find upstream version")
+            r = bb.utils.vercmp_string(v, verstring)
+            self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
+
+    @skipIfNoNetwork()
+    def test_wget_latest_versionstring(self):
+        for k, v in self.test_wget_uris.items():
+            self.d.setVar("PN", k[0])
+            self.d.setVar("UPSTREAM_CHECK_URI", k[2])
+            self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
+            ud = bb.fetch2.FetchData(k[1], self.d)
+            pupver = ud.method.latest_versionstring(ud, self.d)
+            verstring = pupver[0]
+            self.assertTrue(verstring, msg="Could not find upstream version")
+            r = bb.utils.vercmp_string(v, verstring)
+            self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
 
 
 class FetchCheckStatusTest(FetcherTest):
@@ -818,37 +837,636 @@ class FetchCheckStatusTest(FetcherTest):
                       "https://yoctoproject.org/documentation",
                       "http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
                       "http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
-                      "ftp://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz",
-                      "ftp://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz",
-                      "ftp://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz",
+                      "ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz",
+                      "http://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz",
+                      "https://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz",
+                      "https://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz",
                       # GitHub releases are hosted on Amazon S3, which doesn't support HEAD
                       "https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz"
                       ]
 
-    if os.environ.get("BB_SKIP_NETTESTS") == "yes":
-        print("Unset BB_SKIP_NETTESTS to run network tests")
-    else:
-
-        def test_wget_checkstatus(self):
-            fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d)
-            for u in self.test_wget_uris:
+    @skipIfNoNetwork()
+    def test_wget_checkstatus(self):
+        fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d)
+        for u in self.test_wget_uris:
+            with self.subTest(url=u):
                 ud = fetch.ud[u]
                 m = ud.method
                 ret = m.checkstatus(fetch, ud, self.d)
                 self.assertTrue(ret, msg="URI %s, can't check status" % (u))
 
+    @skipIfNoNetwork()
+    def test_wget_checkstatus_connection_cache(self):
+        from bb.fetch2 import FetchConnectionCache
 
-        def test_wget_checkstatus_connection_cache(self):
-            from bb.fetch2 import FetchConnectionCache
-
-            connection_cache = FetchConnectionCache()
-            fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d,
-                        connection_cache = connection_cache)
+        connection_cache = FetchConnectionCache()
+        fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d,
+                    connection_cache = connection_cache)
 
-            for u in self.test_wget_uris:
+        for u in self.test_wget_uris:
+            with self.subTest(url=u):
                 ud = fetch.ud[u]
                 m = ud.method
                 ret = m.checkstatus(fetch, ud, self.d)
                 self.assertTrue(ret, msg="URI %s, can't check status" % (u))
 
-            connection_cache.close_connections()
+        connection_cache.close_connections()
+
+
+class GitMakeShallowTest(FetcherTest):
+    bitbake_dir = os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..')
+    make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
+
+    def setUp(self):
+        FetcherTest.setUp(self)
+        self.gitdir = os.path.join(self.tempdir, 'gitshallow')
+        bb.utils.mkdirhier(self.gitdir)
+        bb.process.run('git init', cwd=self.gitdir)
+
+    def assertRefs(self, expected_refs):
+        actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines()
+        full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs).splitlines()
+        self.assertEqual(sorted(full_expected), sorted(actual_refs))
+
+    def assertRevCount(self, expected_count, args=None):
+        if args is None:
+            args = ['HEAD']
+        revs = self.git(['rev-list'] + args)
+        actual_count = len(revs.splitlines())
+        self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count))
+
+    def git(self, cmd):
+        if isinstance(cmd, str):
+            cmd = 'git ' + cmd
+        else:
+            cmd = ['git'] + cmd
+        return bb.process.run(cmd, cwd=self.gitdir)[0]
+
+    def make_shallow(self, args=None):
+        if args is None:
+            args = ['HEAD']
+        return bb.process.run([self.make_shallow_path] + args, cwd=self.gitdir)
+
+    def add_empty_file(self, path, msg=None):
+        if msg is None:
+            msg = path
+        open(os.path.join(self.gitdir, path), 'w').close()
+        self.git(['add', path])
+        self.git(['commit', '-m', msg, path])
+
+    def test_make_shallow_single_branch_no_merge(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2)
+        self.make_shallow()
+        self.assertRevCount(1)
+
+    def test_make_shallow_single_branch_one_merge(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('checkout -b a_branch')
+        self.add_empty_file('c')
+        self.git('checkout master')
+        self.add_empty_file('d')
+        self.git('merge --no-ff --no-edit a_branch')
+        self.git('branch -d a_branch')
+        self.add_empty_file('e')
+        self.assertRevCount(6)
+        self.make_shallow(['HEAD~2'])
+        self.assertRevCount(5)
+
+    def test_make_shallow_at_merge(self):
+        self.add_empty_file('a')
+        self.git('checkout -b a_branch')
+        self.add_empty_file('b')
+        self.git('checkout master')
+        self.git('merge --no-ff --no-edit a_branch')
+        self.git('branch -d a_branch')
+        self.assertRevCount(3)
+        self.make_shallow()
+        self.assertRevCount(1)
+
+    def test_make_shallow_annotated_tag(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('tag -a -m a_tag a_tag')
+        self.assertRevCount(2)
+        self.make_shallow(['a_tag'])
+        self.assertRevCount(1)
+
+    def test_make_shallow_multi_ref(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('checkout -b a_branch')
+        self.add_empty_file('c')
+        self.git('checkout master')
+        self.add_empty_file('d')
+        self.git('checkout -b a_branch_2')
+        self.add_empty_file('a_tag')
+        self.git('tag a_tag')
+        self.git('checkout master')
+        self.git('branch -D a_branch_2')
+        self.add_empty_file('e')
+        self.assertRevCount(6, ['--all'])
+        self.make_shallow()
+        self.assertRevCount(5, ['--all'])
+
+    def test_make_shallow_multi_ref_trim(self):
+        self.add_empty_file('a')
+        self.git('checkout -b a_branch')
+        self.add_empty_file('c')
+        self.git('checkout master')
+        self.assertRevCount(1)
+        self.assertRevCount(2, ['--all'])
+        self.assertRefs(['master', 'a_branch'])
+        self.make_shallow(['-r', 'master', 'HEAD'])
+        self.assertRevCount(1, ['--all'])
+        self.assertRefs(['master'])
+
+    def test_make_shallow_noop(self):
+        self.add_empty_file('a')
+        self.assertRevCount(1)
+        self.make_shallow()
+        self.assertRevCount(1)
+
+    @skipIfNoNetwork()
+    def test_make_shallow_bitbake(self):
+        self.git('remote add origin https://github.com/openembedded/bitbake')
+        self.git('fetch --tags origin')
+        orig_revs = len(self.git('rev-list --all').splitlines())
+        self.make_shallow(['refs/tags/1.10.0'])
+        self.assertRevCount(orig_revs - 1746, ['--all'])
+
+class GitShallowTest(FetcherTest):
+    def setUp(self):
+        FetcherTest.setUp(self)
+        self.gitdir = os.path.join(self.tempdir, 'git')
+        self.srcdir = os.path.join(self.tempdir, 'gitsource')
+
+        bb.utils.mkdirhier(self.srcdir)
+        self.git('init', cwd=self.srcdir)
+        self.d.setVar('WORKDIR', self.tempdir)
+        self.d.setVar('S', self.gitdir)
+        self.d.delVar('PREMIRRORS')
+        self.d.delVar('MIRRORS')
+
+        uri = 'git://%s;protocol=file;subdir=${S}' % self.srcdir
+        self.d.setVar('SRC_URI', uri)
+        self.d.setVar('SRCREV', '${AUTOREV}')
+        self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}')
+
+        self.d.setVar('BB_GIT_SHALLOW', '1')
+        self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0')
+        self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
+
+    def assertRefs(self, expected_refs, cwd=None):
+        if cwd is None:
+            cwd = self.gitdir
+        actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines()
+        full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines()
+        self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs)))
+
+    def assertRevCount(self, expected_count, args=None, cwd=None):
+        if args is None:
+            args = ['HEAD']
+        if cwd is None:
+            cwd = self.gitdir
+        revs = self.git(['rev-list'] + args, cwd=cwd)
+        actual_count = len(revs.splitlines())
+        self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count))
+
+    def git(self, cmd, cwd=None):
+        if isinstance(cmd, str):
+            cmd = 'git ' + cmd
+        else:
+            cmd = ['git'] + cmd
+        if cwd is None:
+            cwd = self.gitdir
+        return bb.process.run(cmd, cwd=cwd)[0]
+
+    def add_empty_file(self, path, cwd=None, msg=None):
+        if msg is None:
+            msg = path
+        if cwd is None:
+            cwd = self.srcdir
+        open(os.path.join(cwd, path), 'w').close()
+        self.git(['add', path], cwd)
+        self.git(['commit', '-m', msg, path], cwd)
+
+    def fetch(self, uri=None):
+        if uri is None:
+            uris = self.d.getVar('SRC_URI', True).split()
+            uri = uris[0]
+            d = self.d
+        else:
+            d = self.d.createCopy()
+            d.setVar('SRC_URI', uri)
+            uri = d.expand(uri)
+            uris = [uri]
+
+        fetcher = bb.fetch2.Fetch(uris, d)
+        fetcher.download()
+        ud = fetcher.ud[uri]
+        return fetcher, ud
+
+    def fetch_and_unpack(self, uri=None):
+        fetcher, ud = self.fetch(uri)
+        fetcher.unpack(self.d.getVar('WORKDIR'))
+        assert os.path.exists(self.d.getVar('S'))
+        return fetcher, ud
+
+    def fetch_shallow(self, uri=None, disabled=False, keepclone=False):
+        """Fetch a uri, generating a shallow tarball, then unpack using it"""
+        fetcher, ud = self.fetch_and_unpack(uri)
+        assert os.path.exists(ud.clonedir), 'Git clone in DLDIR (%s) does not exist for uri %s' % (ud.clonedir, uri)
+
+        # Confirm that the unpacked repo is unshallow
+        if not disabled:
+            assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0]))
+
+        # fetch and unpack, from the shallow tarball
+        bb.utils.remove(self.gitdir, recurse=True)
+        bb.utils.remove(ud.clonedir, recurse=True)
+
+        # confirm that the unpacked repo is used when no git clone or git
+        # mirror tarball is available
+        fetcher, ud = self.fetch_and_unpack(uri)
+        if not disabled:
+            assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is not shallow' % self.gitdir
+        else:
+            assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is shallow' % self.gitdir
+        return fetcher, ud
+
+    def test_shallow_disabled(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW', '0')
+        self.fetch_shallow(disabled=True)
+        self.assertRevCount(2)
+
+    def test_shallow_nobranch(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip()
+        self.d.setVar('SRCREV', srcrev)
+        uri = self.d.getVar('SRC_URI', True).split()[0]
+        uri = '%s;nobranch=1;bare=1' % uri
+
+        self.fetch_shallow(uri)
+        self.assertRevCount(1)
+
+        # shallow refs are used to ensure the srcrev sticks around when we
+        # have no other branches referencing it
+        self.assertRefs(['refs/shallow/default'])
+
+    def test_shallow_default_depth_1(self):
+        # Create initial git repo
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.fetch_shallow()
+        self.assertRevCount(1)
+
+    def test_shallow_depth_0_disables(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.fetch_shallow(disabled=True)
+        self.assertRevCount(2)
+
+    def test_shallow_depth_default_override(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '2')
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '1')
+        self.fetch_shallow()
+        self.assertRevCount(1)
+
+    def test_shallow_depth_default_override_disable(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.add_empty_file('c')
+        self.assertRevCount(3, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '2')
+        self.fetch_shallow()
+        self.assertRevCount(2)
+
+    def test_current_shallow_out_of_date_clone(self):
+        # Create initial git repo
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.add_empty_file('c')
+        self.assertRevCount(3, cwd=self.srcdir)
+
+        # Clone and generate mirror tarball
+        fetcher, ud = self.fetch()
+
+        # Ensure we have a current mirror tarball, but an out of date clone
+        self.git('update-ref refs/heads/master refs/heads/master~1', cwd=ud.clonedir)
+        self.assertRevCount(2, cwd=ud.clonedir)
+
+        # Fetch and unpack, from the current tarball, not the out of date clone
+        bb.utils.remove(self.gitdir, recurse=True)
+        fetcher, ud = self.fetch()
+        fetcher.unpack(self.d.getVar('WORKDIR'))
+        self.assertRevCount(1)
+
+    def test_shallow_single_branch_no_merge(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.fetch_shallow()
+        self.assertRevCount(1)
+        assert os.path.exists(os.path.join(self.gitdir, 'a'))
+        assert os.path.exists(os.path.join(self.gitdir, 'b'))
+
+    def test_shallow_no_dangling(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.fetch_shallow()
+        self.assertRevCount(1)
+        assert not self.git('fsck --dangling')
+
+    def test_shallow_srcrev_branch_truncation(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        b_commit = self.git('rev-parse HEAD', cwd=self.srcdir).rstrip()
+        self.add_empty_file('c')
+        self.assertRevCount(3, cwd=self.srcdir)
+
+        self.d.setVar('SRCREV', b_commit)
+        self.fetch_shallow()
+
+        # The 'c' commit was removed entirely, and 'a' was removed from history
+        self.assertRevCount(1, ['--all'])
+        self.assertEqual(self.git('rev-parse HEAD').strip(), b_commit)
+        assert os.path.exists(os.path.join(self.gitdir, 'a'))
+        assert os.path.exists(os.path.join(self.gitdir, 'b'))
+        assert not os.path.exists(os.path.join(self.gitdir, 'c'))
+
+    def test_shallow_ref_pruning(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('branch a_branch', cwd=self.srcdir)
+        self.assertRefs(['master', 'a_branch'], cwd=self.srcdir)
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.fetch_shallow()
+
+        self.assertRefs(['master', 'origin/master'])
+        self.assertRevCount(1)
+
+    def test_shallow_submodules(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        smdir = os.path.join(self.tempdir, 'gitsubmodule')
+        bb.utils.mkdirhier(smdir)
+        self.git('init', cwd=smdir)
+        self.add_empty_file('asub', cwd=smdir)
+
+        self.git('submodule init', cwd=self.srcdir)
+        self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+        self.git('submodule update', cwd=self.srcdir)
+        self.git('commit -m submodule -a', cwd=self.srcdir)
+
+        uri = 'gitsm://%s;protocol=file;subdir=${S}' % self.srcdir
+        fetcher, ud = self.fetch_shallow(uri)
+
+        self.assertRevCount(1)
+        assert './.git/modules/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
+        assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule'))
+
+    if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')):
+        def test_shallow_annex(self):
+            self.add_empty_file('a')
+            self.add_empty_file('b')
+            self.git('annex init', cwd=self.srcdir)
+            open(os.path.join(self.srcdir, 'c'), 'w').close()
+            self.git('annex add c', cwd=self.srcdir)
+            self.git('commit -m annex-c -a', cwd=self.srcdir)
+            bb.process.run('chmod u+w -R %s' % os.path.join(self.srcdir, '.git', 'annex'))
+
+            uri = 'gitannex://%s;protocol=file;subdir=${S}' % self.srcdir
+            fetcher, ud = self.fetch_shallow(uri)
+
+            self.assertRevCount(1)
+            assert './.git/annex/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
+            assert os.path.exists(os.path.join(self.gitdir, 'c'))
+
+    def test_shallow_multi_one_uri(self):
+        # Create initial git repo
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('checkout -b a_branch', cwd=self.srcdir)
+        self.add_empty_file('c')
+        self.add_empty_file('d')
+        self.git('checkout master', cwd=self.srcdir)
+        self.git('tag v0.0 a_branch', cwd=self.srcdir)
+        self.add_empty_file('e')
+        self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+        self.add_empty_file('f')
+        self.assertRevCount(7, cwd=self.srcdir)
+
+        uri = self.d.getVar('SRC_URI', True).split()[0]
+        uri = '%s;branch=master,a_branch;name=master,a_branch' % uri
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+        self.d.setVar('SRCREV_master', '${AUTOREV}')
+        self.d.setVar('SRCREV_a_branch', '${AUTOREV}')
+
+        self.fetch_shallow(uri)
+
+        self.assertRevCount(5)
+        self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+
+    def test_shallow_multi_one_uri_depths(self):
+        # Create initial git repo
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('checkout -b a_branch', cwd=self.srcdir)
+        self.add_empty_file('c')
+        self.add_empty_file('d')
+        self.git('checkout master', cwd=self.srcdir)
+        self.add_empty_file('e')
+        self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+        self.add_empty_file('f')
+        self.assertRevCount(7, cwd=self.srcdir)
+
+        uri = self.d.getVar('SRC_URI', True).split()[0]
+        uri = '%s;branch=master,a_branch;name=master,a_branch' % uri
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH_master', '3')
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH_a_branch', '1')
+        self.d.setVar('SRCREV_master', '${AUTOREV}')
+        self.d.setVar('SRCREV_a_branch', '${AUTOREV}')
+
+        self.fetch_shallow(uri)
+
+        self.assertRevCount(4, ['--all'])
+        self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+
+    def test_shallow_clone_preferred_over_shallow(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        # Fetch once to generate the shallow tarball
+        fetcher, ud = self.fetch()
+        assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0]))
+
+        # Fetch and unpack with both the clonedir and shallow tarball available
+        bb.utils.remove(self.gitdir, recurse=True)
+        fetcher, ud = self.fetch_and_unpack()
+
+        # The unpacked tree should *not* be shallow
+        self.assertRevCount(2)
+        assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow'))
+
+    def test_shallow_mirrors(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        # Fetch once to generate the shallow tarball
+        fetcher, ud = self.fetch()
+        mirrortarball = ud.mirrortarballs[0]
+        assert os.path.exists(os.path.join(self.dldir, mirrortarball))
+
+        # Set up the mirror
+        mirrordir = os.path.join(self.tempdir, 'mirror')
+        bb.utils.mkdirhier(mirrordir)
+        self.d.setVar('PREMIRRORS', 'git://.*/.* file://%s/\n' % mirrordir)
+
+        os.rename(os.path.join(self.dldir, mirrortarball),
+                  os.path.join(mirrordir, mirrortarball))
+
+        # Fetch from the mirror
+        bb.utils.remove(self.dldir, recurse=True)
+        bb.utils.remove(self.gitdir, recurse=True)
+        self.fetch_and_unpack()
+        self.assertRevCount(1)
+
+    def test_shallow_invalid_depth(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '-12')
+        with self.assertRaises(bb.fetch2.FetchError):
+            self.fetch()
+
+    def test_shallow_invalid_depth_default(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '-12')
+        with self.assertRaises(bb.fetch2.FetchError):
+            self.fetch()
+
+    def test_shallow_extra_refs(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('branch a_branch', cwd=self.srcdir)
+        self.assertRefs(['master', 'a_branch'], cwd=self.srcdir)
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/a_branch')
+        self.fetch_shallow()
+
+        self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
+        self.assertRevCount(1)
+
+    def test_shallow_extra_refs_wildcard(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('branch a_branch', cwd=self.srcdir)
+        self.git('tag v1.0', cwd=self.srcdir)
+        self.assertRefs(['master', 'a_branch', 'v1.0'], cwd=self.srcdir)
+        self.assertRevCount(2, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*')
+        self.fetch_shallow()
+
+        self.assertRefs(['master', 'origin/master', 'v1.0'])
+        self.assertRevCount(1)
+
+    def test_shallow_missing_extra_refs(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/foo')
+        with self.assertRaises(bb.fetch2.FetchError):
+            self.fetch()
+
+    def test_shallow_missing_extra_refs_wildcard(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*')
+        self.fetch()
+
+    def test_shallow_remove_revs(self):
+        # Create initial git repo
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+        self.git('checkout -b a_branch', cwd=self.srcdir)
+        self.add_empty_file('c')
+        self.add_empty_file('d')
+        self.git('checkout master', cwd=self.srcdir)
+        self.git('tag v0.0 a_branch', cwd=self.srcdir)
+        self.add_empty_file('e')
+        self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
+        self.git('branch -d a_branch', cwd=self.srcdir)
+        self.add_empty_file('f')
+        self.assertRevCount(7, cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+
+        self.fetch_shallow()
+
+        self.assertRevCount(5)
+
+    def test_shallow_invalid_revs(self):
+        self.add_empty_file('a')
+        self.add_empty_file('b')
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
+
+        with self.assertRaises(bb.fetch2.FetchError):
+            self.fetch()
+
+    @skipIfNoNetwork()
+    def test_bitbake(self):
+        self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir)
+        self.git('config core.bare true', cwd=self.srcdir)
+        self.git('fetch', cwd=self.srcdir)
+
+        self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
+        # Note that the 1.10.0 tag is annotated, so this also tests
+        # reference of an annotated vs unannotated tag
+        self.d.setVar('BB_GIT_SHALLOW_REVS', '1.10.0')
+
+        self.fetch_shallow()
+
+        # Confirm that the history of 1.10.0 was removed
+        orig_revs = len(self.git('rev-list master', cwd=self.srcdir).splitlines())
+        revs = len(self.git('rev-list master').splitlines())
+        self.assertNotEqual(orig_revs, revs)
+        self.assertRefs(['master', 'origin/master'])
+        self.assertRevCount(orig_revs - 1758)

+ 36 - 15
bitbake/lib/bb/tests/parse.py

@@ -58,9 +58,9 @@ C = "3"
     def test_parse_simple(self):
         f = self.parsehelper(self.testfile)
         d = bb.parse.handle(f.name, self.d)['']
-        self.assertEqual(d.getVar("A", True), "1")
-        self.assertEqual(d.getVar("B", True), "2")
-        self.assertEqual(d.getVar("C", True), "3")
+        self.assertEqual(d.getVar("A"), "1")
+        self.assertEqual(d.getVar("B"), "2")
+        self.assertEqual(d.getVar("C"), "3")
 
     def test_parse_incomplete_function(self):
         testfileB = self.testfile.replace("}", "")
@@ -80,10 +80,31 @@ unset B[flag]
     def test_parse_unset(self):
         f = self.parsehelper(self.unsettest)
         d = bb.parse.handle(f.name, self.d)['']
-        self.assertEqual(d.getVar("A", True), None)
-        self.assertEqual(d.getVarFlag("A","flag", True), None)
-        self.assertEqual(d.getVar("B", True), "2")
-        
+        self.assertEqual(d.getVar("A"), None)
+        self.assertEqual(d.getVarFlag("A","flag"), None)
+        self.assertEqual(d.getVar("B"), "2")
+
+    exporttest = """
+A = "a"
+export B = "b"
+export C
+exportD = "d"
+"""
+
+    def test_parse_exports(self):
+        f = self.parsehelper(self.exporttest)
+        d = bb.parse.handle(f.name, self.d)['']
+        self.assertEqual(d.getVar("A"), "a")
+        self.assertIsNone(d.getVarFlag("A", "export"))
+        self.assertEqual(d.getVar("B"), "b")
+        self.assertEqual(d.getVarFlag("B", "export"), 1)
+        self.assertIsNone(d.getVar("C"))
+        self.assertEqual(d.getVarFlag("C", "export"), 1)
+        self.assertIsNone(d.getVar("D"))
+        self.assertIsNone(d.getVarFlag("D", "export"))
+        self.assertEqual(d.getVar("exportD"), "d")
+        self.assertIsNone(d.getVarFlag("exportD", "export"))
+
 
     overridetest = """
 RRECOMMENDS_${PN} = "a"
@@ -95,11 +116,11 @@ PN = "gtk+"
     def test_parse_overrides(self):
         f = self.parsehelper(self.overridetest)
         d = bb.parse.handle(f.name, self.d)['']
-        self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
+        self.assertEqual(d.getVar("RRECOMMENDS"), "b")
         bb.data.expandKeys(d)
-        self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
+        self.assertEqual(d.getVar("RRECOMMENDS"), "b")
         d.setVar("RRECOMMENDS_gtk+", "c")
-        self.assertEqual(d.getVar("RRECOMMENDS", True), "c")
+        self.assertEqual(d.getVar("RRECOMMENDS"), "c")
 
     overridetest2 = """
 EXTRA_OECONF = ""
@@ -112,7 +133,7 @@ EXTRA_OECONF_append = " c"
         d = bb.parse.handle(f.name, self.d)['']
         d.appendVar("EXTRA_OECONF", " d")
         d.setVar("OVERRIDES", "class-target")
-        self.assertEqual(d.getVar("EXTRA_OECONF", True), "b c d")
+        self.assertEqual(d.getVar("EXTRA_OECONF"), "b c d")
 
     overridetest3 = """
 DESCRIPTION = "A"
@@ -124,11 +145,11 @@ PN = "bc"
         f = self.parsehelper(self.overridetest3)
         d = bb.parse.handle(f.name, self.d)['']
         bb.data.expandKeys(d)
-        self.assertEqual(d.getVar("DESCRIPTION_bc-dev", True), "A B")
+        self.assertEqual(d.getVar("DESCRIPTION_bc-dev"), "A B")
         d.setVar("DESCRIPTION", "E")
         d.setVar("DESCRIPTION_bc-dev", "C D")
         d.setVar("OVERRIDES", "bc-dev")
-        self.assertEqual(d.getVar("DESCRIPTION", True), "C D")
+        self.assertEqual(d.getVar("DESCRIPTION"), "C D")
 
 
     classextend = """
@@ -159,6 +180,6 @@ python () {
         alldata = bb.parse.handle(f.name, self.d)
         d1 = alldata['']
         d2 = alldata[cls.name]
-        self.assertEqual(d1.getVar("VAR_var", True), "B")
-        self.assertEqual(d2.getVar("VAR_var", True), None)
+        self.assertEqual(d1.getVar("VAR_var"), "B")
+        self.assertEqual(d2.getVar("VAR_var"), None)
 

+ 831 - 78
bitbake/lib/bb/tinfoil.py

@@ -1,7 +1,8 @@
 # tinfoil: a simple wrapper around cooker for bitbake-based command-line utilities
 #
-# Copyright (C) 2012 Intel Corporation
+# Copyright (C) 2012-2017 Intel Corporation
 # Copyright (C) 2011 Mentor Graphics Corporation
+# Copyright (C) 2006-2012 Richard Purdie
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 as
@@ -17,47 +18,319 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 import logging
-import warnings
 import os
 import sys
+import atexit
+import re
+from collections import OrderedDict, defaultdict
 
 import bb.cache
 import bb.cooker
 import bb.providers
+import bb.taskdata
 import bb.utils
-from bb.cooker import state, BBCooker, CookerFeatures
+import bb.command
+import bb.remotedata
 from bb.cookerdata import CookerConfiguration, ConfigParameters
+from bb.main import setup_bitbake, BitBakeConfigParameters, BBMainException
 import bb.fetch2
 
+
+# We need this in order to shut down the connection to the bitbake server,
+# otherwise the process will never properly exit
+_server_connections = []
+def _terminate_connections():
+    for connection in _server_connections:
+        connection.terminate()
+atexit.register(_terminate_connections)
+
+class TinfoilUIException(Exception):
+    """Exception raised when the UI returns non-zero from its main function"""
+    def __init__(self, returncode):
+        self.returncode = returncode
+    def __repr__(self):
+        return 'UI module main returned %d' % self.returncode
+
+class TinfoilCommandFailed(Exception):
+    """Exception raised when run_command fails"""
+
+class TinfoilDataStoreConnector:
+    """Connector object used to enable access to datastore objects via tinfoil"""
+
+    def __init__(self, tinfoil, dsindex):
+        self.tinfoil = tinfoil
+        self.dsindex = dsindex
+    def getVar(self, name):
+        value = self.tinfoil.run_command('dataStoreConnectorFindVar', self.dsindex, name)
+        overrides = None
+        if isinstance(value, dict):
+            if '_connector_origtype' in value:
+                value['_content'] = self.tinfoil._reconvert_type(value['_content'], value['_connector_origtype'])
+                del value['_connector_origtype']
+            if '_connector_overrides' in value:
+                overrides = value['_connector_overrides']
+                del value['_connector_overrides']
+        return value, overrides
+    def getKeys(self):
+        return set(self.tinfoil.run_command('dataStoreConnectorGetKeys', self.dsindex))
+    def getVarHistory(self, name):
+        return self.tinfoil.run_command('dataStoreConnectorGetVarHistory', self.dsindex, name)
+    def expandPythonRef(self, varname, expr, d):
+        ds = bb.remotedata.RemoteDatastores.transmit_datastore(d)
+        ret = self.tinfoil.run_command('dataStoreConnectorExpandPythonRef', ds, varname, expr)
+        return ret
+    def setVar(self, varname, value):
+        if self.dsindex is None:
+            self.tinfoil.run_command('setVariable', varname, value)
+        else:
+            # Not currently implemented - indicate that setting should
+            # be redirected to local side
+            return True
+    def setVarFlag(self, varname, flagname, value):
+        if self.dsindex is None:
+            self.tinfoil.run_command('dataStoreConnectorSetVarFlag', self.dsindex, varname, flagname, value)
+        else:
+            # Not currently implemented - indicate that setting should
+            # be redirected to local side
+            return True
+    def delVar(self, varname):
+        if self.dsindex is None:
+            self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname)
+        else:
+            # Not currently implemented - indicate that setting should
+            # be redirected to local side
+            return True
+    def delVarFlag(self, varname, flagname):
+        if self.dsindex is None:
+            self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname, flagname)
+        else:
+            # Not currently implemented - indicate that setting should
+            # be redirected to local side
+            return True
+    def renameVar(self, name, newname):
+        if self.dsindex is None:
+            self.tinfoil.run_command('dataStoreConnectorRenameVar', self.dsindex, name, newname)
+        else:
+            # Not currently implemented - indicate that setting should
+            # be redirected to local side
+            return True
+
+class TinfoilCookerAdapter:
+    """
+    Provide an adapter for existing code that expects to access a cooker object via Tinfoil,
+    since now Tinfoil is on the client side it no longer has direct access.
+    """
+
+    class TinfoilCookerCollectionAdapter:
+        """ cooker.collection adapter """
+        def __init__(self, tinfoil):
+            self.tinfoil = tinfoil
+        def get_file_appends(self, fn):
+            return self.tinfoil.get_file_appends(fn)
+        def __getattr__(self, name):
+            if name == 'overlayed':
+                return self.tinfoil.get_overlayed_recipes()
+            elif name == 'bbappends':
+                return self.tinfoil.run_command('getAllAppends')
+            else:
+                raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+    class TinfoilRecipeCacheAdapter:
+        """ cooker.recipecache adapter """
+        def __init__(self, tinfoil):
+            self.tinfoil = tinfoil
+            self._cache = {}
+
+        def get_pkg_pn_fn(self):
+            pkg_pn = defaultdict(list, self.tinfoil.run_command('getRecipes') or [])
+            pkg_fn = {}
+            for pn, fnlist in pkg_pn.items():
+                for fn in fnlist:
+                    pkg_fn[fn] = pn
+            self._cache['pkg_pn'] = pkg_pn
+            self._cache['pkg_fn'] = pkg_fn
+
+        def __getattr__(self, name):
+            # Grab these only when they are requested since they aren't always used
+            if name in self._cache:
+                return self._cache[name]
+            elif name == 'pkg_pn':
+                self.get_pkg_pn_fn()
+                return self._cache[name]
+            elif name == 'pkg_fn':
+                self.get_pkg_pn_fn()
+                return self._cache[name]
+            elif name == 'deps':
+                attrvalue = defaultdict(list, self.tinfoil.run_command('getRecipeDepends') or [])
+            elif name == 'rundeps':
+                attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeDepends') or [])
+            elif name == 'runrecs':
+                attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeRecommends') or [])
+            elif name == 'pkg_pepvpr':
+                attrvalue = self.tinfoil.run_command('getRecipeVersions') or {}
+            elif name == 'inherits':
+                attrvalue = self.tinfoil.run_command('getRecipeInherits') or {}
+            elif name == 'bbfile_priority':
+                attrvalue = self.tinfoil.run_command('getBbFilePriority') or {}
+            elif name == 'pkg_dp':
+                attrvalue = self.tinfoil.run_command('getDefaultPreference') or {}
+            elif name == 'fn_provides':
+                attrvalue = self.tinfoil.run_command('getRecipeProvides') or {}
+            elif name == 'packages':
+                attrvalue = self.tinfoil.run_command('getRecipePackages') or {}
+            elif name == 'packages_dynamic':
+                attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic') or {}
+            elif name == 'rproviders':
+                attrvalue = self.tinfoil.run_command('getRProviders') or {}
+            else:
+                raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+            self._cache[name] = attrvalue
+            return attrvalue
+
+    def __init__(self, tinfoil):
+        self.tinfoil = tinfoil
+        self.collection = self.TinfoilCookerCollectionAdapter(tinfoil)
+        self.recipecaches = {}
+        # FIXME all machines
+        self.recipecaches[''] = self.TinfoilRecipeCacheAdapter(tinfoil)
+        self._cache = {}
+    def __getattr__(self, name):
+        # Grab these only when they are requested since they aren't always used
+        if name in self._cache:
+            return self._cache[name]
+        elif name == 'skiplist':
+            attrvalue = self.tinfoil.get_skipped_recipes()
+        elif name == 'bbfile_config_priorities':
+            ret = self.tinfoil.run_command('getLayerPriorities')
+            bbfile_config_priorities = []
+            for collection, pattern, regex, pri in ret:
+                bbfile_config_priorities.append((collection, pattern, re.compile(regex), pri))
+
+            attrvalue = bbfile_config_priorities
+        else:
+            raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+
+        self._cache[name] = attrvalue
+        return attrvalue
+
+    def findBestProvider(self, pn):
+        return self.tinfoil.find_best_provider(pn)
+
+
+class TinfoilRecipeInfo:
+    """
+    Provides a convenient representation of the cached information for a single recipe.
+    Some attributes are set on construction, others are read on-demand (which internally
+    may result in a remote procedure call to the bitbake server the first time).
+    Note that only information which is cached is available through this object - if
+    you need other variable values you will need to parse the recipe using
+    Tinfoil.parse_recipe().
+    """
+    def __init__(self, recipecache, d, pn, fn, fns):
+        self._recipecache = recipecache
+        self._d = d
+        self.pn = pn
+        self.fn = fn
+        self.fns = fns
+        self.inherit_files = recipecache.inherits[fn]
+        self.depends = recipecache.deps[fn]
+        (self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn]
+        self._cached_packages = None
+        self._cached_rprovides = None
+        self._cached_packages_dynamic = None
+
+    def __getattr__(self, name):
+        if name == 'alternates':
+            return [x for x in self.fns if x != self.fn]
+        elif name == 'rdepends':
+            return self._recipecache.rundeps[self.fn]
+        elif name == 'rrecommends':
+            return self._recipecache.runrecs[self.fn]
+        elif name == 'provides':
+            return self._recipecache.fn_provides[self.fn]
+        elif name == 'packages':
+            if self._cached_packages is None:
+                self._cached_packages = []
+                for pkg, fns in self._recipecache.packages.items():
+                    if self.fn in fns:
+                        self._cached_packages.append(pkg)
+            return self._cached_packages
+        elif name == 'packages_dynamic':
+            if self._cached_packages_dynamic is None:
+                self._cached_packages_dynamic = []
+                for pkg, fns in self._recipecache.packages_dynamic.items():
+                    if self.fn in fns:
+                        self._cached_packages_dynamic.append(pkg)
+            return self._cached_packages_dynamic
+        elif name == 'rprovides':
+            if self._cached_rprovides is None:
+                self._cached_rprovides = []
+                for pkg, fns in self._recipecache.rproviders.items():
+                    if self.fn in fns:
+                        self._cached_rprovides.append(pkg)
+            return self._cached_rprovides
+        else:
+            raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
+    def inherits(self, only_recipe=False):
+        """
+        Get the inherited classes for a recipe. Returns the class names only.
+        Parameters:
+            only_recipe: True to return only the classes inherited by the recipe
+                         itself, False to return all classes inherited within
+                         the context for the recipe (which includes globally
+                         inherited classes).
+        """
+        if only_recipe:
+            global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')]
+        else:
+            global_inherit = []
+        for clsfile in self.inherit_files:
+            if only_recipe and clsfile in global_inherit:
+                continue
+            clsname = os.path.splitext(os.path.basename(clsfile))[0]
+            yield clsname
+    def __str__(self):
+        return '%s' % self.pn
+
+
 class Tinfoil:
-    def __init__(self, output=sys.stdout, tracking=False):
-        # Needed to avoid deprecation warnings with python 2.6
-        warnings.filterwarnings("ignore", category=DeprecationWarning)
+    """
+    Tinfoil - an API for scripts and utilities to query
+    BitBake internals and perform build operations.
+    """
 
-        # Set up logging
+    def __init__(self, output=sys.stdout, tracking=False, setup_logging=True):
+        """
+        Create a new tinfoil object.
+        Parameters:
+            output: specifies where console output should be sent. Defaults
+                    to sys.stdout.
+            tracking: True to enable variable history tracking, False to
+                    disable it (default). Enabling this has a minor
+                    performance impact so typically it isn't enabled
+                    unless you need to query variable history.
+            setup_logging: True to setup a logger so that things like
+                    bb.warn() will work immediately and timeout warnings
+                    are visible; False to let BitBake do this itself.
+        """
         self.logger = logging.getLogger('BitBake')
-        self._log_hdlr = logging.StreamHandler(output)
-        bb.msg.addDefaultlogFilter(self._log_hdlr)
-        format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
-        if output.isatty():
-            format.enable_color()
-        self._log_hdlr.setFormatter(format)
-        self.logger.addHandler(self._log_hdlr)
-
-        self.config = CookerConfiguration()
-        configparams = TinfoilConfigParameters(parse_only=True)
-        self.config.setConfigParameters(configparams)
-        self.config.setServerRegIdleCallback(self.register_idle_function)
-        features = []
-        if tracking:
-            features.append(CookerFeatures.BASEDATASTORE_TRACKING)
-        self.cooker = BBCooker(self.config, features)
-        self.config_data = self.cooker.data
-        bb.providers.logger.setLevel(logging.ERROR)
-        self.cooker_data = None
-
-    def register_idle_function(self, function, data):
-        pass
+        self.config_data = None
+        self.cooker = None
+        self.tracking = tracking
+        self.ui_module = None
+        self.server_connection = None
+        self.recipes_parsed = False
+        self.quiet = 0
+        self.oldhandlers = self.logger.handlers[:]
+        if setup_logging:
+            # This is the *client-side* logger, nothing to do with
+            # logging messages from the server
+            bb.msg.logger_create('BitBake', output)
+            self.localhandlers = []
+            for handler in self.logger.handlers:
+                if handler not in self.oldhandlers:
+                    self.localhandlers.append(handler)
 
     def __enter__(self):
         return self
@@ -65,30 +338,290 @@ class Tinfoil:
     def __exit__(self, type, value, traceback):
         self.shutdown()
 
-    def parseRecipes(self):
-        sys.stderr.write("Parsing recipes..")
-        self.logger.setLevel(logging.WARNING)
+    def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None):
+        """
+        Prepares the underlying BitBake system to be used via tinfoil.
+        This function must be called prior to calling any of the other
+        functions in the API.
+        NOTE: if you call prepare() you must absolutely call shutdown()
+        before your code terminates. You can use a "with" block to ensure
+        this happens e.g.
 
-        try:
-            while self.cooker.state in (state.initial, state.parsing):
-                self.cooker.updateCache()
-        except KeyboardInterrupt:
-            self.cooker.shutdown()
-            self.cooker.updateCache()
-            sys.exit(2)
+            with bb.tinfoil.Tinfoil() as tinfoil:
+                tinfoil.prepare()
+                ...
 
-        self.logger.setLevel(logging.INFO)
-        sys.stderr.write("done.\n")
+        Parameters:
+            config_only: True to read only the configuration and not load
+                        the cache / parse recipes. This is useful if you just
+                        want to query the value of a variable at the global
+                        level or you want to do anything else that doesn't
+                        involve knowing anything about the recipes in the
+                        current configuration. False loads the cache / parses
+                        recipes.
+            config_params: optionally specify your own configuration
+                        parameters. If not specified an instance of
+                        TinfoilConfigParameters will be created internally.
+            quiet:      quiet level controlling console output - equivalent
+                        to bitbake's -q/--quiet option. Default of 0 gives
+                        the same output level as normal bitbake execution.
+            extra_features: extra features to be added to the feature
+                        set requested from the server. See
+                        CookerFeatures._feature_list for possible
+                        features.
+        """
+        self.quiet = quiet
+
+        if self.tracking:
+            extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
+        else:
+            extrafeatures = []
 
-        self.cooker_data = self.cooker.recipecaches['']
+        if extra_features:
+            extrafeatures += extra_features
 
-    def prepare(self, config_only = False):
-        if not self.cooker_data:
+        if not config_params:
+            config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet)
+
+        cookerconfig = CookerConfiguration()
+        cookerconfig.setConfigParameters(config_params)
+
+        if not config_only:
+            # Disable local loggers because the UI module is going to set up its own
+            for handler in self.localhandlers:
+                self.logger.handlers.remove(handler)
+            self.localhandlers = []
+
+        self.server_connection, ui_module = setup_bitbake(config_params,
+                            cookerconfig,
+                            extrafeatures)
+
+        self.ui_module = ui_module
+
+        # Ensure the path to bitbake's bin directory is in PATH so that things like
+        # bitbake-worker can be run (usually this is the case, but it doesn't have to be)
+        path = os.getenv('PATH').split(':')
+        bitbakebinpath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'bin'))
+        for entry in path:
+            if entry.endswith(os.sep):
+                entry = entry[:-1]
+            if os.path.abspath(entry) == bitbakebinpath:
+                break
+        else:
+            path.insert(0, bitbakebinpath)
+            os.environ['PATH'] = ':'.join(path)
+
+        if self.server_connection:
+            _server_connections.append(self.server_connection)
             if config_only:
-                self.cooker.parseConfiguration()
-                self.cooker_data = self.cooker.recipecaches['']
+                config_params.updateToServer(self.server_connection.connection, os.environ.copy())
+                self.run_command('parseConfiguration')
             else:
-                self.parseRecipes()
+                self.run_actions(config_params)
+                self.recipes_parsed = True
+
+            self.config_data = bb.data.init()
+            connector = TinfoilDataStoreConnector(self, None)
+            self.config_data.setVar('_remote_data', connector)
+            self.cooker = TinfoilCookerAdapter(self)
+            self.cooker_data = self.cooker.recipecaches['']
+        else:
+            raise Exception('Failed to start bitbake server')
+
+    def run_actions(self, config_params):
+        """
+        Run the actions specified in config_params through the UI.
+        """
+        ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params)
+        if ret:
+            raise TinfoilUIException(ret)
+
+    def parseRecipes(self):
+        """
+        Legacy function - use parse_recipes() instead.
+        """
+        self.parse_recipes()
+
+    def parse_recipes(self):
+        """
+        Load information on all recipes. Normally you should specify
+        config_only=False when calling prepare() instead of using this
+        function; this function is designed for situations where you need
+        to initialise Tinfoil and use it with config_only=True first and
+        then conditionally call this function to parse recipes later.
+        """
+        config_params = TinfoilConfigParameters(config_only=False)
+        self.run_actions(config_params)
+        self.recipes_parsed = True
+
+    def run_command(self, command, *params):
+        """
+        Run a command on the server (as implemented in bb.command).
+        Note that there are two types of command - synchronous and
+        asynchronous; in order to receive the results of asynchronous
+        commands you will need to set an appropriate event mask
+        using set_event_mask() and listen for the result using
+        wait_event() - with the correct event mask you'll at least get
+        bb.command.CommandCompleted and possibly other events before
+        that depending on the command.
+        """
+        if not self.server_connection:
+            raise Exception('Not connected to server (did you call .prepare()?)')
+
+        commandline = [command]
+        if params:
+            commandline.extend(params)
+        result = self.server_connection.connection.runCommand(commandline)
+        if result[1]:
+            raise TinfoilCommandFailed(result[1])
+        return result[0]
+
+    def set_event_mask(self, eventlist):
+        """Set the event mask which will be applied within wait_event()"""
+        if not self.server_connection:
+            raise Exception('Not connected to server (did you call .prepare()?)')
+        llevel, debug_domains = bb.msg.constructLogOptions()
+        ret = self.run_command('setEventMask', self.server_connection.connection.getEventHandle(), llevel, debug_domains, eventlist)
+        if not ret:
+            raise Exception('setEventMask failed')
+
+    def wait_event(self, timeout=0):
+        """
+        Wait for an event from the server for the specified time.
+        A timeout of 0 means don't wait if there are no events in the queue.
+        Returns the next event in the queue or None if the timeout was
+        reached. Note that in order to recieve any events you will
+        first need to set the internal event mask using set_event_mask()
+        (otherwise whatever event mask the UI set up will be in effect).
+        """
+        if not self.server_connection:
+            raise Exception('Not connected to server (did you call .prepare()?)')
+        return self.server_connection.events.waitEvent(timeout)
+
+    def get_overlayed_recipes(self):
+        """
+        Find recipes which are overlayed (i.e. where recipes exist in multiple layers)
+        """
+        return defaultdict(list, self.run_command('getOverlayedRecipes'))
+
+    def get_skipped_recipes(self):
+        """
+        Find recipes which were skipped (i.e. SkipRecipe was raised
+        during parsing).
+        """
+        return OrderedDict(self.run_command('getSkippedRecipes'))
+
+    def get_all_providers(self):
+        return defaultdict(list, self.run_command('allProviders'))
+
+    def find_providers(self):
+        return self.run_command('findProviders')
+
+    def find_best_provider(self, pn):
+        return self.run_command('findBestProvider', pn)
+
+    def get_runtime_providers(self, rdep):
+        return self.run_command('getRuntimeProviders', rdep)
+
+    def get_recipe_file(self, pn):
+        """
+        Get the file name for the specified recipe/target. Raises
+        bb.providers.NoProvider if there is no match or the recipe was
+        skipped.
+        """
+        best = self.find_best_provider(pn)
+        if not best or (len(best) > 3 and not best[3]):
+            skiplist = self.get_skipped_recipes()
+            taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
+            skipreasons = taskdata.get_reasons(pn)
+            if skipreasons:
+                raise bb.providers.NoProvider('%s is unavailable:\n  %s' % (pn, '  \n'.join(skipreasons)))
+            else:
+                raise bb.providers.NoProvider('Unable to find any recipe file matching "%s"' % pn)
+        return best[3]
+
+    def get_file_appends(self, fn):
+        """
+        Find the bbappends for a recipe file
+        """
+        return self.run_command('getFileAppends', fn)
+
+    def all_recipes(self, mc='', sort=True):
+        """
+        Enable iterating over all recipes in the current configuration.
+        Returns an iterator over TinfoilRecipeInfo objects created on demand.
+        Parameters:
+            mc: The multiconfig, default of '' uses the main configuration.
+            sort: True to sort recipes alphabetically (default), False otherwise
+        """
+        recipecache = self.cooker.recipecaches[mc]
+        if sort:
+            recipes = sorted(recipecache.pkg_pn.items())
+        else:
+            recipes = recipecache.pkg_pn.items()
+        for pn, fns in recipes:
+            prov = self.find_best_provider(pn)
+            recipe = TinfoilRecipeInfo(recipecache,
+                                       self.config_data,
+                                       pn=pn,
+                                       fn=prov[3],
+                                       fns=fns)
+            yield recipe
+
+    def all_recipe_files(self, mc='', variants=True, preferred_only=False):
+        """
+        Enable iterating over all recipe files in the current configuration.
+        Returns an iterator over file paths.
+        Parameters:
+            mc: The multiconfig, default of '' uses the main configuration.
+            variants: True to include variants of recipes created through
+                      BBCLASSEXTEND (default) or False to exclude them
+            preferred_only: True to include only the preferred recipe where
+                      multiple exist providing the same PN, False to list
+                      all recipes
+        """
+        recipecache = self.cooker.recipecaches[mc]
+        if preferred_only:
+            files = []
+            for pn in recipecache.pkg_pn.keys():
+                prov = self.find_best_provider(pn)
+                files.append(prov[3])
+        else:
+            files = recipecache.pkg_fn.keys()
+        for fn in sorted(files):
+            if not variants and fn.startswith('virtual:'):
+                continue
+            yield fn
+
+
+    def get_recipe_info(self, pn, mc=''):
+        """
+        Get information on a specific recipe in the current configuration by name (PN).
+        Returns a TinfoilRecipeInfo object created on demand.
+        Parameters:
+            mc: The multiconfig, default of '' uses the main configuration.
+        """
+        recipecache = self.cooker.recipecaches[mc]
+        prov = self.find_best_provider(pn)
+        fn = prov[3]
+        if fn:
+            actual_pn = recipecache.pkg_fn[fn]
+            recipe = TinfoilRecipeInfo(recipecache,
+                                        self.config_data,
+                                        pn=actual_pn,
+                                        fn=fn,
+                                        fns=recipecache.pkg_pn[actual_pn])
+            return recipe
+        else:
+            return None
+
+    def parse_recipe(self, pn):
+        """
+        Parse the specified recipe and return a datastore object
+        representing the environment for the recipe.
+        """
+        fn = self.get_recipe_file(pn)
+        return self.parse_recipe_file(fn)
 
     def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None):
         """
@@ -105,43 +638,263 @@ class Tinfoil:
                          specify config_data then you cannot use a virtual
                          specification for fn.
         """
-        if appends and appendlist == []:
-            appends = False
-        if appends:
-            if appendlist:
-                appendfiles = appendlist
+        if self.tracking:
+            # Enable history tracking just for the parse operation
+            self.run_command('enableDataTracking')
+        try:
+            if appends and appendlist == []:
+                appends = False
+            if config_data:
+                dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data)
+                dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr)
             else:
-                if not hasattr(self.cooker, 'collection'):
-                    raise Exception('You must call tinfoil.prepare() with config_only=False in order to get bbappends')
-                appendfiles = self.cooker.collection.get_file_appends(fn)
-        else:
-            appendfiles = None
-        if config_data:
-            # We have to use a different function here if we're passing in a datastore
-            localdata = bb.data.createCopy(config_data)
-            envdata = bb.cache.parse_recipe(localdata, fn, appendfiles)['']
+                dscon = self.run_command('parseRecipeFile', fn, appends, appendlist)
+            if dscon:
+                return self._reconvert_type(dscon, 'DataStoreConnectionHandle')
+            else:
+                return None
+        finally:
+            if self.tracking:
+                self.run_command('disableDataTracking')
+
+    def build_file(self, buildfile, task, internal=True):
+        """
+        Runs the specified task for just a single recipe (i.e. no dependencies).
+        This is equivalent to bitbake -b, except with the default internal=True
+        no warning about dependencies will be produced, normal info messages
+        from the runqueue will be silenced and BuildInit, BuildStarted and
+        BuildCompleted events will not be fired.
+        """
+        return self.run_command('buildFile', buildfile, task, internal)
+
+    def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None):
+        """
+        Builds the specified targets. This is equivalent to a normal invocation
+        of bitbake. Has built-in event handling which is enabled by default and
+        can be extended if needed.
+        Parameters:
+            targets:
+                One or more targets to build. Can be a list or a
+                space-separated string.
+            task:
+                The task to run; if None then the value of BB_DEFAULT_TASK
+                will be used. Default None.
+            handle_events:
+                True to handle events in a similar way to normal bitbake
+                invocation with knotty; False to return immediately (on the
+                assumption that the caller will handle the events instead).
+                Default True.
+            extra_events:
+                An optional list of events to add to the event mask (if
+                handle_events=True). If you add events here you also need
+                to specify a callback function in event_callback that will
+                handle the additional events. Default None.
+            event_callback:
+                An optional function taking a single parameter which
+                will be called first upon receiving any event (if
+                handle_events=True) so that the caller can override or
+                extend the event handling. Default None.
+        """
+        if isinstance(targets, str):
+            targets = targets.split()
+        if not task:
+            task = self.config_data.getVar('BB_DEFAULT_TASK')
+
+        if handle_events:
+            # A reasonable set of default events matching up with those we handle below
+            eventmask = [
+                        'bb.event.BuildStarted',
+                        'bb.event.BuildCompleted',
+                        'logging.LogRecord',
+                        'bb.event.NoProvider',
+                        'bb.command.CommandCompleted',
+                        'bb.command.CommandFailed',
+                        'bb.build.TaskStarted',
+                        'bb.build.TaskFailed',
+                        'bb.build.TaskSucceeded',
+                        'bb.build.TaskFailedSilent',
+                        'bb.build.TaskProgress',
+                        'bb.runqueue.runQueueTaskStarted',
+                        'bb.runqueue.sceneQueueTaskStarted',
+                        'bb.event.ProcessStarted',
+                        'bb.event.ProcessProgress',
+                        'bb.event.ProcessFinished',
+                        ]
+            if extra_events:
+                eventmask.extend(extra_events)
+            ret = self.set_event_mask(eventmask)
+
+        includelogs = self.config_data.getVar('BBINCLUDELOGS')
+        loglines = self.config_data.getVar('BBINCLUDELOGS_LINES')
+
+        ret = self.run_command('buildTargets', targets, task)
+        if handle_events:
+            result = False
+            # Borrowed from knotty, instead somewhat hackily we use the helper
+            # as the object to store "shutdown" on
+            helper = bb.ui.uihelper.BBUIHelper()
+            # We set up logging optionally in the constructor so now we need to
+            # grab the handlers to pass to TerminalFilter
+            console = None
+            errconsole = None
+            for handler in self.logger.handlers:
+                if isinstance(handler, logging.StreamHandler):
+                    if handler.stream == sys.stdout:
+                        console = handler
+                    elif handler.stream == sys.stderr:
+                        errconsole = handler
+            format_str = "%(levelname)s: %(message)s"
+            format = bb.msg.BBLogFormatter(format_str)
+            helper.shutdown = 0
+            parseprogress = None
+            termfilter = bb.ui.knotty.TerminalFilter(helper, helper, console, errconsole, format, quiet=self.quiet)
+            try:
+                while True:
+                    try:
+                        event = self.wait_event(0.25)
+                        if event:
+                            if event_callback and event_callback(event):
+                                continue
+                            if helper.eventHandler(event):
+                                if isinstance(event, bb.build.TaskFailedSilent):
+                                    logger.warning("Logfile for failed setscene task is %s" % event.logfile)
+                                elif isinstance(event, bb.build.TaskFailed):
+                                    bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter)
+                                continue
+                            if isinstance(event, bb.event.ProcessStarted):
+                                if self.quiet > 1:
+                                    continue
+                                parseprogress = bb.ui.knotty.new_progress(event.processname, event.total)
+                                parseprogress.start(False)
+                                continue
+                            if isinstance(event, bb.event.ProcessProgress):
+                                if self.quiet > 1:
+                                    continue
+                                if parseprogress:
+                                    parseprogress.update(event.progress)
+                                else:
+                                    bb.warn("Got ProcessProgress event for someting that never started?")
+                                continue
+                            if isinstance(event, bb.event.ProcessFinished):
+                                if self.quiet > 1:
+                                    continue
+                                if parseprogress:
+                                    parseprogress.finish()
+                                parseprogress = None
+                                continue
+                            if isinstance(event, bb.command.CommandCompleted):
+                                result = True
+                                break
+                            if isinstance(event, bb.command.CommandFailed):
+                                self.logger.error(str(event))
+                                result = False
+                                break
+                            if isinstance(event, logging.LogRecord):
+                                if event.taskpid == 0 or event.levelno > logging.INFO:
+                                    self.logger.handle(event)
+                                continue
+                            if isinstance(event, bb.event.NoProvider):
+                                self.logger.error(str(event))
+                                result = False
+                                break
+
+                        elif helper.shutdown > 1:
+                            break
+                        termfilter.updateFooter()
+                    except KeyboardInterrupt:
+                        termfilter.clearFooter()
+                        if helper.shutdown == 1:
+                            print("\nSecond Keyboard Interrupt, stopping...\n")
+                            ret = self.run_command("stateForceShutdown")
+                            if ret and ret[2]:
+                                self.logger.error("Unable to cleanly stop: %s" % ret[2])
+                        elif helper.shutdown == 0:
+                            print("\nKeyboard Interrupt, closing down...\n")
+                            interrupted = True
+                            ret = self.run_command("stateShutdown")
+                            if ret and ret[2]:
+                                self.logger.error("Unable to cleanly shutdown: %s" % ret[2])
+                        helper.shutdown = helper.shutdown + 1
+                termfilter.clearFooter()
+            finally:
+                termfilter.finish()
+            if helper.failed_tasks:
+                result = False
+            return result
         else:
-            # Use the standard path
-            parser = bb.cache.NoCache(self.cooker.databuilder)
-            envdata = parser.loadDataFull(fn, appendfiles)
-        return envdata
+            return ret
 
     def shutdown(self):
-        self.cooker.shutdown(force=True)
-        self.cooker.post_serve()
-        self.cooker.unlockBitbake()
-        self.logger.removeHandler(self._log_hdlr)
+        """
+        Shut down tinfoil. Disconnects from the server and gracefully
+        releases any associated resources. You must call this function if
+        prepare() has been called, or use a with... block when you create
+        the tinfoil object which will ensure that it gets called.
+        """
+        if self.server_connection:
+            self.run_command('clientComplete')
+            _server_connections.remove(self.server_connection)
+            bb.event.ui_queue = []
+            self.server_connection.terminate()
+            self.server_connection = None
+
+        # Restore logging handlers to how it looked when we started
+        if self.oldhandlers:
+            for handler in self.logger.handlers:
+                if handler not in self.oldhandlers:
+                    self.logger.handlers.remove(handler)
+
+    def _reconvert_type(self, obj, origtypename):
+        """
+        Convert an object back to the right type, in the case
+        that marshalling has changed it (especially with xmlrpc)
+        """
+        supported_types = {
+            'set': set,
+            'DataStoreConnectionHandle': bb.command.DataStoreConnectionHandle,
+        }
+
+        origtype = supported_types.get(origtypename, None)
+        if origtype is None:
+            raise Exception('Unsupported type "%s"' % origtypename)
+        if type(obj) == origtype:
+            newobj = obj
+        elif isinstance(obj, dict):
+            # New style class
+            newobj = origtype()
+            for k,v in obj.items():
+                setattr(newobj, k, v)
+        else:
+            # Assume we can coerce the type
+            newobj = origtype(obj)
+
+        if isinstance(newobj, bb.command.DataStoreConnectionHandle):
+            connector = TinfoilDataStoreConnector(self, newobj.dsindex)
+            newobj = bb.data.init()
+            newobj.setVar('_remote_data', connector)
 
-class TinfoilConfigParameters(ConfigParameters):
+        return newobj
 
-    def __init__(self, **options):
+
+class TinfoilConfigParameters(BitBakeConfigParameters):
+
+    def __init__(self, config_only, **options):
         self.initial_options = options
-        super(TinfoilConfigParameters, self).__init__()
+        # Apply some sane defaults
+        if not 'parse_only' in options:
+            self.initial_options['parse_only'] = not config_only
+        #if not 'status_only' in options:
+        #    self.initial_options['status_only'] = config_only
+        if not 'ui' in options:
+            self.initial_options['ui'] = 'knotty'
+        if not 'argv' in options:
+            self.initial_options['argv'] = []
 
-    def parseCommandLine(self, argv=sys.argv):
-        class DummyOptions:
-            def __init__(self, initial_options):
-                for key, val in initial_options.items():
-                    setattr(self, key, val)
+        super(TinfoilConfigParameters, self).__init__()
 
-        return DummyOptions(self.initial_options), None
+    def parseCommandLine(self, argv=None):
+        # We don't want any parameters parsed from the command line
+        opts = super(TinfoilConfigParameters, self).parseCommandLine([])
+        for key, val in self.initial_options.items():
+            setattr(opts[0], key, val)
+        return opts

+ 122 - 54
bitbake/lib/bb/ui/buildinfohelper.py

@@ -42,10 +42,12 @@ from orm.models import Variable, VariableHistory
 from orm.models import Package, Package_File, Target_Installed_Package, Target_File
 from orm.models import Task_Dependency, Package_Dependency
 from orm.models import Recipe_Dependency, Provides
-from orm.models import Project, CustomImagePackage, CustomImageRecipe
+from orm.models import Project, CustomImagePackage
 from orm.models import signal_runbuilds
 
 from bldcontrol.models import BuildEnvironment, BuildRequest
+from bldcontrol.models import BRLayer
+from bldcontrol import bbcontroller
 
 from bb.msg import BBLogFormatter as formatter
 from django.db import models
@@ -361,11 +363,6 @@ class ORMWrapper(object):
 
     def get_update_layer_version_object(self, build_obj, layer_obj, layer_version_information):
         if isinstance(layer_obj, Layer_Version):
-            # Special case the toaster-custom-images layer which is created
-            # on the fly so don't update the values which may cause the layer
-            # to be duplicated on a future get_or_create
-            if layer_obj.layer.name == CustomImageRecipe.LAYER_NAME:
-                return layer_obj
             # We already found our layer version for this build so just
             # update it with the new build information
             logger.debug("We found our layer from toaster")
@@ -384,8 +381,8 @@ class ORMWrapper(object):
                 local_path=layer_version_information['local_path'],
             )
 
-            logger.info("created new historical layer version %d",
-                        layer_copy.pk)
+            logger.debug("Created new layer version %s for build history",
+                         layer_copy.layer.name)
 
             self.layer_version_built.append(layer_copy)
 
@@ -441,48 +438,33 @@ class ORMWrapper(object):
         else:
             br_id, be_id = brbe.split(":")
 
-            # find layer by checkout path;
-            from bldcontrol import bbcontroller
-            bc = bbcontroller.getBuildEnvironmentController(pk = be_id)
-
-            # we might have a race condition here, as the project layers may change between the build trigger and the actual build execution
-            # but we can only match on the layer name, so the worst thing can happen is a mis-identification of the layer, not a total failure
-
-            # note that this is different
-            buildrequest = BuildRequest.objects.get(pk = br_id)
-            for brl in buildrequest.brlayer_set.all():
-                if brl.local_source_dir:
-                    localdirname = os.path.join(brl.local_source_dir,
-                                                brl.dirpath)
-                else:
-                    localdirname = os.path.join(bc.getGitCloneDirectory(brl.giturl, brl.commit), brl.dirpath)
-                # we get a relative path, unless running in HEAD mode where the path is absolute
-                if not localdirname.startswith("/"):
-                    localdirname = os.path.join(bc.be.sourcedir, localdirname)
-                #logger.debug(1, "Localdirname %s lcal_path %s" % (localdirname, layer_information['local_path']))
-                if localdirname.startswith(layer_information['local_path']):
-                  # If the build request came from toaster this field
-                  # should contain the information from the layer_version
-                  # That created this build request.
-                    if brl.layer_version:
-                        return brl.layer_version
-
-                # This might be a local layer (i.e. no git info) so try
-                # matching local_source_dir
-                if brl.local_source_dir and brl.local_source_dir == layer_information["local_path"]:
-                    return brl.layer_version
-
-                    # we matched the BRLayer, but we need the layer_version that generated this BR; reverse of the Project.schedule_build()
-                    #logger.debug(1, "Matched %s to BRlayer %s" % (pformat(layer_information["local_path"]), localdirname))
-
-                    for pl in buildrequest.project.projectlayer_set.filter(layercommit__layer__name = brl.name):
-                        if pl.layercommit.layer.vcs_url == brl.giturl :
-                            layer = pl.layercommit.layer
-                            layer.save()
-                            return layer
-
-            raise NotExisting("Unidentified layer %s" % pformat(layer_information))
+            # Find the layer version by matching the layer event information
+            # against the metadata we have in Toaster
 
+            try:
+                br_layer = BRLayer.objects.get(req=br_id,
+                                               name=layer_information['name'])
+                return br_layer.layer_version
+            except (BRLayer.MultipleObjectsReturned, BRLayer.DoesNotExist):
+                # There are multiple of the same layer name or the name
+                # hasn't been determined by the toaster.bbclass layer
+                # so let's filter by the local_path
+                bc = bbcontroller.getBuildEnvironmentController(pk=be_id)
+                for br_layer in BRLayer.objects.filter(req=br_id):
+                    if br_layer.giturl and \
+                       layer_information['local_path'].endswith(
+                           bc.getGitCloneDirectory(br_layer.giturl,
+                                                   br_layer.commit)):
+                            return br_layer.layer_version
+
+                    if br_layer.local_source_dir == \
+                            layer_information['local_path']:
+                        return br_layer.layer_version
+
+        # We've reached the end of our search and couldn't find the layer
+        # we can continue but some data may be missing
+        raise NotExisting("Unidentified layer %s" %
+                          pformat(layer_information))
 
     def save_target_file_information(self, build_obj, target_obj, filedata):
         assert isinstance(build_obj, Build)
@@ -737,7 +719,11 @@ class ORMWrapper(object):
 
     def save_build_package_information(self, build_obj, package_info, recipes,
                                        built_package):
-       # assert isinstance(build_obj, Build)
+        # assert isinstance(build_obj, Build)
+
+        if not 'PN' in package_info.keys():
+            # no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*")
+            return None
 
         # create and save the object
         pname = package_info['PKG']
@@ -876,6 +862,12 @@ class MockEvent(object):
         self.pathname = None
         self.lineno = None
 
+    def getMessage(self):
+        """
+        Simulate LogRecord message return
+        """
+        return self.msg
+
 
 class BuildInfoHelper(object):
     """ This class gathers the build information from the server and sends it
@@ -982,6 +974,44 @@ class BuildInfoHelper(object):
             pass
         return task_information
 
+    def _get_layer_version_for_dependency(self, pathRE):
+        """ Returns the layer in the toaster db that has a full regex
+        match to the pathRE. pathRE - the layer path passed as a regex in the
+        event. It is created in cooker.py as a collection for the layer
+        priorities.
+        """
+        self._ensure_build()
+
+        def _sort_longest_path(layer_version):
+            assert isinstance(layer_version, Layer_Version)
+            return len(layer_version.local_path)
+
+        # Our paths don't append a trailing slash
+        if pathRE.endswith("/"):
+            pathRE = pathRE[:-1]
+
+        p = re.compile(pathRE)
+        path=re.sub(r'[$^]',r'',pathRE)
+        # Heuristics: we always match recipe to the deepest layer path in
+        # the discovered layers
+        for lvo in sorted(self.orm_wrapper.layer_version_objects,
+                          reverse=True, key=_sort_longest_path):
+            if p.fullmatch(os.path.abspath(lvo.local_path)):
+                return lvo
+            if lvo.layer.local_source_dir:
+                if p.fullmatch(os.path.abspath(lvo.layer.local_source_dir)):
+                    return lvo
+            if 0 == path.find(lvo.local_path):
+                # sub-layer path inside existing layer
+                return lvo
+
+        # if we get here, we didn't read layers correctly;
+        # dump whatever information we have on the error log
+        logger.warning("Could not match layer dependency for path %s : %s",
+                       pathRE,
+                       self.orm_wrapper.layer_version_objects)
+        return None
+
     def _get_layer_version_for_path(self, path):
         self._ensure_build()
 
@@ -1243,6 +1273,14 @@ class BuildInfoHelper(object):
                 candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
                 if len(candidates) == 1:
                     identifier = candidates[0]
+                elif len(candidates) > 1 and hasattr(event,'_package'):
+                    if 'native-' in event._package:
+                        identifier = 'native:' + identifier
+                    if 'nativesdk-' in event._package:
+                        identifier = 'nativesdk:' + identifier
+                    candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
+                    if len(candidates) == 1:
+                        identifier = candidates[0]
 
         assert identifier in self.internal_state['taskdata']
         identifierlist = identifier.split(":")
@@ -1372,10 +1410,10 @@ class BuildInfoHelper(object):
         if 'layer-priorities' in event._depgraph.keys():
             for lv in event._depgraph['layer-priorities']:
                 (_, path, _, priority) = lv
-                layer_version_obj = self._get_layer_version_for_path(path[1:]) # paths start with a ^
-                assert layer_version_obj is not None
-                layer_version_obj.priority = priority
-                layer_version_obj.save()
+                layer_version_obj = self._get_layer_version_for_dependency(path)
+                if layer_version_obj:
+                    layer_version_obj.priority = priority
+                    layer_version_obj.save()
 
         # save recipe information
         self.internal_state['recipes'] = {}
@@ -1640,6 +1678,36 @@ class BuildInfoHelper(object):
                 break
         return endswith
 
+    def scan_task_artifacts(self, event):
+        """
+        The 'TaskArtifacts' event passes the manifest file content for the
+        tasks 'do_deploy', 'do_image_complete', 'do_populate_sdk', and
+        'do_populate_sdk_ext'. The first two will be implemented later.
+        """
+        task_vars = BuildInfoHelper._get_data_from_event(event)
+        task_name = task_vars['task'][task_vars['task'].find(':')+1:]
+        task_artifacts = task_vars['artifacts']
+
+        if task_name in ['do_populate_sdk', 'do_populate_sdk_ext']:
+            targets = [target for target in self.internal_state['targets'] \
+                if target.task == task_name[3:]]
+            if not targets:
+                logger.warning("scan_task_artifacts: SDK targets not found: %s\n", task_name)
+                return
+            for artifact_path in task_artifacts:
+                if not os.path.isfile(artifact_path):
+                    logger.warning("scan_task_artifacts: artifact file not found: %s\n", artifact_path)
+                    continue
+                for target in targets:
+                    # don't record the file if it's already been added
+                    # to this target
+                    matching_files = TargetSDKFile.objects.filter(
+                        target=target, file_name=artifact_path)
+                    if matching_files.count() == 0:
+                        artifact_size = os.stat(artifact_path).st_size
+                        self.orm_wrapper.save_target_sdk_file(
+                            target, artifact_path, artifact_size)
+
     def _get_image_files(self, deploy_dir_image, image_name, image_file_extensions):
         """
         Find files in deploy_dir_image whose basename starts with the

+ 91 - 81
bitbake/lib/bb/ui/knotty.py

@@ -32,6 +32,7 @@ import fcntl
 import struct
 import copy
 import atexit
+
 from bb.ui import uihelper
 
 featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS]
@@ -40,7 +41,7 @@ logger = logging.getLogger("BitBake")
 interactive = sys.stdout.isatty()
 
 class BBProgress(progressbar.ProgressBar):
-    def __init__(self, msg, maxval, widgets=None, extrapos=-1):
+    def __init__(self, msg, maxval, widgets=None, extrapos=-1, resize_handler=None):
         self.msg = msg
         self.extrapos = extrapos
         if not widgets:
@@ -48,10 +49,10 @@ class BBProgress(progressbar.ProgressBar):
             progressbar.ETA()]
             self.extrapos = 4
 
-        try:
+        if resize_handler:
+            self._resize_default = resize_handler
+        else:
             self._resize_default = signal.getsignal(signal.SIGWINCH)
-        except:
-            self._resize_default = None
         progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets, fd=sys.stdout)
 
     def _handle_resize(self, signum=None, frame=None):
@@ -74,10 +75,8 @@ class BBProgress(progressbar.ProgressBar):
                 extrastr = str(extra)
                 if extrastr[0] != ' ':
                     extrastr = ' ' + extrastr
-                if extrastr[-1] != ' ':
-                    extrastr += ' '
             else:
-                extrastr = ' '
+                extrastr = ''
             self.widgets[self.extrapos] = extrastr
 
     def _need_update(self):
@@ -208,8 +207,10 @@ class TerminalFilter(object):
             self.interactive = False
             bb.note("Unable to use interactive mode for this terminal, using fallback")
             return
-        console.addFilter(InteractConsoleLogFilter(self, format))
-        errconsole.addFilter(InteractConsoleLogFilter(self, format))
+        if console:
+            console.addFilter(InteractConsoleLogFilter(self, format))
+        if errconsole:
+            errconsole.addFilter(InteractConsoleLogFilter(self, format))
 
         self.main_progress = None
 
@@ -247,10 +248,10 @@ class TerminalFilter(object):
                 start_time = activetasks[t].get("starttime", None)
                 if not pbar or pbar.bouncing != (progress < 0):
                     if progress < 0:
-                        pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.BouncingSlider(), ''], extrapos=2)
+                        pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.BouncingSlider(), ''], extrapos=2, resize_handler=self.sigwinch_handle)
                         pbar.bouncing = True
                     else:
-                        pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=4)
+                        pbar = BBProgress("0: %s (pid %s) " % (activetasks[t]["title"], t), 100, widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ''], extrapos=4, resize_handler=self.sigwinch_handle)
                         pbar.bouncing = False
                     activetasks[t]["progressbar"] = pbar
                 tasks.append((pbar, progress, rate, start_time))
@@ -274,7 +275,7 @@ class TerminalFilter(object):
             maxtask = self.helper.tasknumber_total
             if not self.main_progress or self.main_progress.maxval != maxtask:
                 widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar()]
-                self.main_progress = BBProgress("Running tasks", maxtask, widgets=widgets)
+                self.main_progress = BBProgress("Running tasks", maxtask, widgets=widgets, resize_handler=self.sigwinch_handle)
                 self.main_progress.start(False)
             self.main_progress.setmessage(content)
             progress = self.helper.tasknumber_current - 1
@@ -283,7 +284,7 @@ class TerminalFilter(object):
             content = self.main_progress.update(progress)
             print('')
         lines = 1 + int(len(content) / (self.columns + 1))
-        if not self.quiet:
+        if self.quiet == 0:
             for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
                 if isinstance(task, tuple):
                     pbar, progress, rate, start_time = task
@@ -311,7 +312,33 @@ class TerminalFilter(object):
             fd = sys.stdin.fileno()
             self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
 
-def _log_settings_from_server(server):
+def print_event_log(event, includelogs, loglines, termfilter):
+    # FIXME refactor this out further
+    logfile = event.logfile
+    if logfile and os.path.exists(logfile):
+        termfilter.clearFooter()
+        bb.error("Logfile of failure stored in: %s" % logfile)
+        if includelogs and not event.errprinted:
+            print("Log data follows:")
+            f = open(logfile, "r")
+            lines = []
+            while True:
+                l = f.readline()
+                if l == '':
+                    break
+                l = l.rstrip()
+                if loglines:
+                    lines.append(' | %s' % l)
+                    if len(lines) > int(loglines):
+                        lines.pop(0)
+                else:
+                    print('| %s' % l)
+            f.close()
+            if lines:
+                for line in lines:
+                    print(line)
+
+def _log_settings_from_server(server, observe_only):
     # Get values of variables which control our output
     includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
     if error:
@@ -321,7 +348,11 @@ def _log_settings_from_server(server):
     if error:
         logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
         raise BaseException(error)
-    consolelogfile, error = server.runCommand(["getSetVariable", "BB_CONSOLELOG"])
+    if observe_only:
+        cmd = 'getVariable'
+    else:
+        cmd = 'getSetVariable'
+    consolelogfile, error = server.runCommand([cmd, "BB_CONSOLELOG"])
     if error:
         logger.error("Unable to get the value of BB_CONSOLELOG variable: %s" % error)
         raise BaseException(error)
@@ -339,7 +370,10 @@ _evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.Lo
 
 def main(server, eventHandler, params, tf = TerminalFilter):
 
-    includelogs, loglines, consolelogfile = _log_settings_from_server(server)
+    if not params.observe_only:
+        params.updateToServer(server, os.environ.copy())
+
+    includelogs, loglines, consolelogfile = _log_settings_from_server(server, params.observe_only)
 
     if sys.stdin.isatty() and sys.stdout.isatty():
         log_exec_tty = True
@@ -352,15 +386,19 @@ def main(server, eventHandler, params, tf = TerminalFilter):
     errconsole = logging.StreamHandler(sys.stderr)
     format_str = "%(levelname)s: %(message)s"
     format = bb.msg.BBLogFormatter(format_str)
-    if params.options.quiet:
-        bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, bb.msg.BBLogFormatter.WARNING)
+    if params.options.quiet == 0:
+        forcelevel = None
+    elif params.options.quiet > 2:
+        forcelevel = bb.msg.BBLogFormatter.ERROR
     else:
-        bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut)
+        forcelevel = bb.msg.BBLogFormatter.WARNING
+    bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, forcelevel)
     bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr)
     console.setFormatter(format)
     errconsole.setFormatter(format)
-    logger.addHandler(console)
-    logger.addHandler(errconsole)
+    if not bb.msg.has_console_handler(logger):
+        logger.addHandler(console)
+        logger.addHandler(errconsole)
 
     bb.utils.set_process_name("KnottyUI")
 
@@ -389,7 +427,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
     universe = False
     if not params.observe_only:
         params.updateFromServer(server)
-        params.updateToServer(server, os.environ.copy())
         cmdline = params.parseActions()
         if not cmdline:
             print("Nothing to do.  Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
@@ -465,11 +502,11 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                         continue
 
                     # Prefix task messages with recipe/task
-                    if event.taskpid in helper.running_tasks:
+                    if event.taskpid in helper.running_tasks and event.levelno != format.PLAIN:
                         taskinfo = helper.running_tasks[event.taskpid]
                         event.msg = taskinfo['title'] + ': ' + event.msg
                 if hasattr(event, 'fn'):
-                        event.msg = event.fn + ': ' + event.msg
+                    event.msg = event.fn + ': ' + event.msg
                 logger.handle(event)
                 continue
 
@@ -478,62 +515,52 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                 continue
             if isinstance(event, bb.build.TaskFailed):
                 return_value = 1
-                logfile = event.logfile
-                if logfile and os.path.exists(logfile):
-                    termfilter.clearFooter()
-                    bb.error("Logfile of failure stored in: %s" % logfile)
-                    if includelogs and not event.errprinted:
-                        print("Log data follows:")
-                        f = open(logfile, "r")
-                        lines = []
-                        while True:
-                            l = f.readline()
-                            if l == '':
-                                break
-                            l = l.rstrip()
-                            if loglines:
-                                lines.append(' | %s' % l)
-                                if len(lines) > int(loglines):
-                                    lines.pop(0)
-                            else:
-                                print('| %s' % l)
-                        f.close()
-                        if lines:
-                            for line in lines:
-                                print(line)
+                print_event_log(event, includelogs, loglines, termfilter)
             if isinstance(event, bb.build.TaskBase):
                 logger.info(event._message)
                 continue
             if isinstance(event, bb.event.ParseStarted):
+                if params.options.quiet > 1:
+                    continue
                 if event.total == 0:
                     continue
                 parseprogress = new_progress("Parsing recipes", event.total).start()
                 continue
             if isinstance(event, bb.event.ParseProgress):
+                if params.options.quiet > 1:
+                    continue
                 if parseprogress:
                     parseprogress.update(event.current)
                 else:
                     bb.warn("Got ParseProgress event for parsing that never started?")
                 continue
             if isinstance(event, bb.event.ParseCompleted):
+                if params.options.quiet > 1:
+                    continue
                 if not parseprogress:
                     continue
                 parseprogress.finish()
                 pasreprogress = None
-                if not params.options.quiet:
+                if params.options.quiet == 0:
                     print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
                         % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
                 continue
 
             if isinstance(event, bb.event.CacheLoadStarted):
+                if params.options.quiet > 1:
+                    continue
                 cacheprogress = new_progress("Loading cache", event.total).start()
                 continue
             if isinstance(event, bb.event.CacheLoadProgress):
+                if params.options.quiet > 1:
+                    continue
                 cacheprogress.update(event.current)
                 continue
             if isinstance(event, bb.event.CacheLoadCompleted):
+                if params.options.quiet > 1:
+                    continue
                 cacheprogress.finish()
-                if not params.options.quiet:
+                if params.options.quiet == 0:
                     print("Loaded %d entries from dependency cache." % event.num_entries)
                 continue
 
@@ -541,7 +568,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                 return_value = event.exitcode
                 if event.error:
                     errors = errors + 1
-                    logger.error("Command execution failed: %s", event.error)
+                    logger.error(str(event))
                 main.shutdown = 2
                 continue
             if isinstance(event, bb.command.CommandExit):
@@ -552,39 +579,16 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                 main.shutdown = 2
                 continue
             if isinstance(event, bb.event.MultipleProviders):
-                logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
-                            event._item,
-                            ", ".join(event._candidates))
-                rtime = ""
-                if event._is_runtime:
-                    rtime = "R"
-                logger.info("consider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, event._item))
+                logger.info(str(event))
                 continue
             if isinstance(event, bb.event.NoProvider):
-                if event._runtime:
-                    r = "R"
-                else:
-                    r = ""
-
-                extra = ''
-                if not event._reasons:
-                    if event._close_matches:
-                        extra = ". Close matches:\n  %s" % '\n  '.join(event._close_matches)
-
                 # For universe builds, only show these as warnings, not errors
-                h = logger.warning
                 if not universe:
                     return_value = 1
                     errors = errors + 1
-                    h = logger.error
-
-                if event._dependees:
-                    h("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s", r, event._item, ", ".join(event._dependees), r, extra)
+                    logger.error(str(event))
                 else:
-                    h("Nothing %sPROVIDES '%s'%s", r, event._item, extra)
-                if event._reasons:
-                    for reason in event._reasons:
-                        h("%s", reason)
+                    logger.warning(str(event))
                 continue
 
             if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
@@ -606,29 +610,33 @@ def main(server, eventHandler, params, tf = TerminalFilter):
             if isinstance(event, bb.runqueue.runQueueTaskFailed):
                 return_value = 1
                 taskfailures.append(event.taskstring)
-                logger.error("Task (%s) failed with exit code '%s'",
-                             event.taskstring, event.exitcode)
+                logger.error(str(event))
                 continue
 
             if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
-                logger.warning("Setscene task (%s) failed with exit code '%s' - real task will be run instead",
-                               event.taskstring, event.exitcode)
+                logger.warning(str(event))
                 continue
 
             if isinstance(event, bb.event.DepTreeGenerated):
                 continue
 
             if isinstance(event, bb.event.ProcessStarted):
+                if params.options.quiet > 1:
+                    continue
                 parseprogress = new_progress(event.processname, event.total)
                 parseprogress.start(False)
                 continue
             if isinstance(event, bb.event.ProcessProgress):
+                if params.options.quiet > 1:
+                    continue
                 if parseprogress:
                     parseprogress.update(event.progress)
                 else:
                     bb.warn("Got ProcessProgress event for someting that never started?")
                 continue
             if isinstance(event, bb.event.ProcessFinished):
+                if params.options.quiet > 1:
+                    continue
                 if parseprogress:
                     parseprogress.finish()
                 parseprogress = None
@@ -639,6 +647,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                                   bb.event.MetadataEvent,
                                   bb.event.StampUpdate,
                                   bb.event.ConfigParsed,
+                                  bb.event.MultiConfigParsed,
                                   bb.event.RecipeParsed,
                                   bb.event.RecipePreFinalise,
                                   bb.runqueue.runQueueEvent,
@@ -646,6 +655,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
                                   bb.event.OperationCompleted,
                                   bb.event.OperationProgress,
                                   bb.event.DiskFull,
+                                  bb.event.HeartbeatEvent,
                                   bb.build.TaskProgress)):
                 continue
 
@@ -699,7 +709,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
         if return_value and errors:
             summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
                                  "\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
-        if summary and not params.options.quiet:
+        if summary and params.options.quiet == 0:
             print(summary)
 
         if interrupted:

+ 2 - 2
bitbake/lib/bb/ui/ncurses.py

@@ -297,7 +297,7 @@ class NCursesUI:
 #                            bb.error("log data follows (%s)" % logfile)
 #                            number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
 #                            if number_of_lines:
-#                                subprocess.call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
+#                                subprocess.check_call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
 #                            else:
 #                                f = open(logfile, "r")
 #                                while True:
@@ -315,7 +315,7 @@ class NCursesUI:
                     # also allow them to now exit with a single ^C
                     shutdown = 2
                 if isinstance(event, bb.command.CommandFailed):
-                    mw.appendText("Command execution failed: %s" % event.error)
+                    mw.appendText(str(event))
                     time.sleep(2)
                     exitflag = True
                 if isinstance(event, bb.command.CommandExit):

+ 21 - 27
bitbake/lib/bb/ui/depexp.py → bitbake/lib/bb/ui/taskexp.py

@@ -63,7 +63,9 @@ class PackageReverseDepView(Gtk.TreeView):
         self.current = None
         self.filter_model = model.filter_new()
         self.filter_model.set_visible_func(self._filter)
-        self.set_model(self.filter_model)
+        self.sort_model = self.filter_model.sort_new_with_model()
+        self.sort_model.set_sort_column_id(COL_DEP_PARENT, Gtk.SortType.ASCENDING)
+        self.set_model(self.sort_model)
         self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT))
 
     def _filter(self, model, iter, data):
@@ -78,7 +80,7 @@ class PackageReverseDepView(Gtk.TreeView):
 class DepExplorer(Gtk.Window):
     def __init__(self):
         Gtk.Window.__init__(self)
-        self.set_title("Dependency Explorer")
+        self.set_title("Task Dependency Explorer")
         self.set_default_size(500, 500)
         self.connect("delete-event", Gtk.main_quit)
 
@@ -106,30 +108,21 @@ class DepExplorer(Gtk.Window):
 
         box = Gtk.VBox(homogeneous=True, spacing=4)
 
-        # Runtime Depends
+        # Task Depends
         scrolled = Gtk.ScrolledWindow()
         scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
         scrolled.set_shadow_type(Gtk.ShadowType.IN)
-        self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends")
-        self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
-        scrolled.add(self.rdep_treeview)
-        box.add(scrolled)
-
-        # Build Depends
-        scrolled = Gtk.ScrolledWindow()
-        scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
-        scrolled.set_shadow_type(Gtk.ShadowType.IN)
-        self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends")
+        self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Dependencies")
         self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
         scrolled.add(self.dep_treeview)
         box.add(scrolled)
         pane.add2(box)
 
-        # Reverse Depends
+        # Reverse Task Depends
         scrolled = Gtk.ScrolledWindow()
         scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
         scrolled.set_shadow_type(Gtk.ShadowType.IN)
-        self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends")
+        self.revdep_treeview = PackageReverseDepView(self.depends_model, "Dependent Tasks")
         self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
         scrolled.add(self.revdep_treeview)
         box.add(scrolled)
@@ -160,22 +153,15 @@ class DepExplorer(Gtk.Window):
             current_package = None
         else:
             current_package = model.get_value(it, COL_PKG_NAME)
-        self.rdep_treeview.set_current_package(current_package)
         self.dep_treeview.set_current_package(current_package)
         self.revdep_treeview.set_current_package(current_package)
 
 
     def parse(self, depgraph):
-        for package in depgraph["pn"]:
-            self.pkg_model.insert(0, (package,))
-
-        for package in depgraph["depends"]:
-            for depend in depgraph["depends"][package]:
-                self.depends_model.insert (0, (TYPE_DEP, package, depend))
-
-        for package in depgraph["rdepends-pn"]:
-            for rdepend in depgraph["rdepends-pn"][package]:
-                self.depends_model.insert (0, (TYPE_RDEP, package, rdepend))
+        for task in depgraph["tdepends"]:
+            self.pkg_model.insert(0, (task,))
+            for depend in depgraph["tdepends"][task]:
+                self.depends_model.insert (0, (TYPE_DEP, task, depend))
 
 
 class gtkthread(threading.Thread):
@@ -301,8 +287,16 @@ def main(server, eventHandler, params):
             if isinstance(event, bb.command.CommandCompleted):
                 continue
 
+            if isinstance(event, bb.event.NoProvider):
+                print(str(event))
+
+                _, error = server.runCommand(["stateShutdown"])
+                if error:
+                    print('Unable to cleanly shutdown: %s' % error)
+                break
+
             if isinstance(event, bb.command.CommandFailed):
-                print("Command execution failed: %s" % event.error)
+                print(str(event))
                 return event.exitcode
 
             if isinstance(event, bb.command.CommandExit):

+ 11 - 24
bitbake/lib/bb/ui/toasterui.py

@@ -168,6 +168,9 @@ def main(server, eventHandler, params):
         logger.warning("buildhistory is not enabled. Please enable INHERIT += \"buildhistory\" to see image details.")
         build_history_enabled = False
 
+    if not "buildstats" in inheritlist.split(" "):
+        logger.warning("buildstats is not enabled. Please enable INHERIT += \"buildstats\" to generate build statistics.")
+
     if not params.observe_only:
         params.updateFromServer(server)
         params.updateToServer(server, os.environ.copy())
@@ -233,6 +236,9 @@ def main(server, eventHandler, params):
             # pylint: disable=protected-access
             # the code will look into the protected variables of the event; no easy way around this
 
+            if isinstance(event, bb.event.HeartbeatEvent):
+                continue
+
             if isinstance(event, bb.event.ParseStarted):
                 if not (build_log and build_log_file_path):
                     build_log, build_log_file_path = _open_build_log(log_dir)
@@ -314,29 +320,13 @@ def main(server, eventHandler, params):
             if isinstance(event, bb.event.CacheLoadCompleted):
                 continue
             if isinstance(event, bb.event.MultipleProviders):
-                logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
-                            event._item,
-                            ", ".join(event._candidates))
-                logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item)
+                logger.info(str(event))
                 continue
 
             if isinstance(event, bb.event.NoProvider):
                 errors = errors + 1
-                if event._runtime:
-                    r = "R"
-                else:
-                    r = ""
-
-                if event._dependees:
-                    text = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)" % (r, event._item, ", ".join(event._dependees), r)
-                else:
-                    text = "Nothing %sPROVIDES '%s'" % (r, event._item)
-
+                text = str(event)
                 logger.error(text)
-                if event._reasons:
-                    for reason in event._reasons:
-                        logger.error("%s", reason)
-                        text += reason
                 buildinfohelper.store_log_error(text)
                 continue
 
@@ -358,8 +348,7 @@ def main(server, eventHandler, params):
             if isinstance(event, bb.runqueue.runQueueTaskFailed):
                 buildinfohelper.update_and_store_task(event)
                 taskfailures.append(event.taskstring)
-                logger.error("Task (%s) failed with exit code '%s'",
-                             event.taskstring, event.exitcode)
+                logger.error(str(event))
                 continue
 
             if isinstance(event, (bb.runqueue.sceneQueueTaskCompleted, bb.runqueue.sceneQueueTaskFailed)):
@@ -376,7 +365,7 @@ def main(server, eventHandler, params):
                 if isinstance(event, bb.command.CommandFailed):
                     errors += 1
                     errorcode = 1
-                    logger.error("Command execution failed: %s", event.error)
+                    logger.error(str(event))
                 elif isinstance(event, bb.event.BuildCompleted):
                     buildinfohelper.scan_image_artifacts()
                     buildinfohelper.clone_required_sdk_artifacts()
@@ -432,9 +421,7 @@ def main(server, eventHandler, params):
                 elif event.type == "SetBRBE":
                     buildinfohelper.brbe = buildinfohelper._get_data_from_event(event)
                 elif event.type == "TaskArtifacts":
-                    # not implemented yet
-                    # see https://bugzilla.yoctoproject.org/show_bug.cgi?id=10283 for details
-                    pass
+                    buildinfohelper.scan_task_artifacts(event)
                 elif event.type == "OSErrorException":
                     logger.error(event)
                 else:

+ 7 - 1
bitbake/lib/bb/ui/uihelper.py

@@ -32,7 +32,10 @@ class BBUIHelper:
 
     def eventHandler(self, event):
         if isinstance(event, bb.build.TaskStarted):
-            self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() }
+            if event._mc != "default":
+                self.running_tasks[event.pid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time() }
+            else:
+                self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() }
             self.running_pids.append(event.pid)
             self.needUpdate = True
         elif isinstance(event, bb.build.TaskSucceeded):
@@ -58,6 +61,9 @@ class BBUIHelper:
                 self.running_tasks[event.pid]['progress'] = event.progress
                 self.running_tasks[event.pid]['rate'] = event.rate
                 self.needUpdate = True
+        else:
+            return False
+        return True
 
     def getTasks(self):
         self.needUpdate = False

+ 74 - 41
bitbake/lib/bb/utils.py

@@ -378,7 +378,7 @@ def _print_exception(t, value, tb, realfile, text, context):
 
         # If the exception is from spwaning a task, let's be helpful and display
         # the output (which hopefully includes stderr).
-        if isinstance(value, subprocess.CalledProcessError):
+        if isinstance(value, subprocess.CalledProcessError) and value.output:
             error.append("Subprocess output:")
             error.append(value.output.decode("utf-8", errors="ignore"))
     finally:
@@ -523,12 +523,8 @@ def md5_file(filename):
     """
     Return the hex string representation of the MD5 checksum of filename.
     """
-    try:
-        import hashlib
-        m = hashlib.md5()
-    except ImportError:
-        import md5
-        m = md5.new()
+    import hashlib
+    m = hashlib.md5()
 
     with open(filename, "rb") as f:
         for line in f:
@@ -538,14 +534,9 @@ def md5_file(filename):
 def sha256_file(filename):
     """
     Return the hex string representation of the 256-bit SHA checksum of
-    filename.  On Python 2.4 this will return None, so callers will need to
-    handle that by either skipping SHA checks, or running a standalone sha256sum
-    binary.
+    filename.
     """
-    try:
-        import hashlib
-    except ImportError:
-        return None
+    import hashlib
 
     s = hashlib.sha256()
     with open(filename, "rb") as f:
@@ -557,10 +548,7 @@ def sha1_file(filename):
     """
     Return the hex string representation of the SHA1 checksum of the filename
     """
-    try:
-        import hashlib
-    except ImportError:
-        return None
+    import hashlib
 
     s = hashlib.sha1()
     with open(filename, "rb") as f:
@@ -665,7 +653,7 @@ def build_environment(d):
     for var in bb.data.keys(d):
         export = d.getVarFlag(var, "export", False)
         if export:
-            os.environ[var] = d.getVar(var, True) or ""
+            os.environ[var] = d.getVar(var) or ""
 
 def _check_unsafe_delete_path(path):
     """
@@ -692,7 +680,7 @@ def remove(path, recurse=False):
             if _check_unsafe_delete_path(path):
                 raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
         # shutil.rmtree(name) would be ideal but its too slow
-        subprocess.call(['rm', '-rf'] + glob.glob(path))
+        subprocess.check_call(['rm', '-rf'] + glob.glob(path))
         return
     for name in glob.glob(path):
         try:
@@ -783,13 +771,14 @@ def movefile(src, dest, newmtime = None, sstat = None):
             return None
 
     renamefailed = 1
+    # os.rename needs to know the dest path ending with file name
+    # so append the file name to a path only if it's a dir specified
+    srcfname = os.path.basename(src)
+    destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
+                else dest
+
     if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
         try:
-            # os.rename needs to know the dest path ending with file name
-            # so append the file name to a path only if it's a dir specified
-            srcfname = os.path.basename(src)
-            destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
-                        else dest
             os.rename(src, destpath)
             renamefailed = 0
         except Exception as e:
@@ -803,8 +792,8 @@ def movefile(src, dest, newmtime = None, sstat = None):
         didcopy = 0
         if stat.S_ISREG(sstat[stat.ST_MODE]):
             try: # For safety copy then move it over.
-                shutil.copyfile(src, dest + "#new")
-                os.rename(dest + "#new", dest)
+                shutil.copyfile(src, destpath + "#new")
+                os.rename(destpath + "#new", destpath)
                 didcopy = 1
             except Exception as e:
                 print('movefile: copy', src, '->', dest, 'failed.', e)
@@ -825,9 +814,9 @@ def movefile(src, dest, newmtime = None, sstat = None):
             return None
 
     if newmtime:
-        os.utime(dest, (newmtime, newmtime))
+        os.utime(destpath, (newmtime, newmtime))
     else:
-        os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+        os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
         newmtime = sstat[stat.ST_MTIME]
     return newmtime
 
@@ -911,11 +900,20 @@ def copyfile(src, dest, newmtime = None, sstat = None):
         newmtime = sstat[stat.ST_MTIME]
     return newmtime
 
-def which(path, item, direction = 0, history = False):
+def which(path, item, direction = 0, history = False, executable=False):
     """
-    Locate a file in a PATH
+    Locate `item` in the list of paths `path` (colon separated string like $PATH).
+    If `direction` is non-zero then the list is reversed.
+    If `history` is True then the list of candidates also returned as result,history.
+    If `executable` is True then the candidate has to be an executable file,
+    otherwise the candidate simply has to exist.
     """
 
+    if executable:
+        is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK)
+    else:
+        is_candidate = lambda p: os.path.exists(p)
+
     hist = []
     paths = (path or "").split(':')
     if direction != 0:
@@ -924,7 +922,7 @@ def which(path, item, direction = 0, history = False):
     for p in paths:
         next = os.path.join(p, item)
         hist.append(next)
-        if os.path.exists(next):
+        if is_candidate(next):
             if not os.path.isabs(next):
                 next = os.path.abspath(next)
             if history:
@@ -953,7 +951,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
     Arguments:
 
     variable -- the variable name. This will be fetched and expanded (using
-    d.getVar(variable, True)) and then split into a set().
+    d.getVar(variable)) and then split into a set().
 
     checkvalues -- if this is a string it is split on whitespace into a set(),
     otherwise coerced directly into a set().
@@ -966,7 +964,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
     d -- the data store.
     """
 
-    val = d.getVar(variable, True)
+    val = d.getVar(variable)
     if not val:
         return falsevalue
     val = set(val.split())
@@ -979,7 +977,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
     return falsevalue
 
 def contains_any(variable, checkvalues, truevalue, falsevalue, d):
-    val = d.getVar(variable, True)
+    val = d.getVar(variable)
     if not val:
         return falsevalue
     val = set(val.split())
@@ -991,6 +989,30 @@ def contains_any(variable, checkvalues, truevalue, falsevalue, d):
         return truevalue
     return falsevalue
 
+def filter(variable, checkvalues, d):
+    """Return all words in the variable that are present in the checkvalues.
+
+    Arguments:
+
+    variable -- the variable name. This will be fetched and expanded (using
+    d.getVar(variable)) and then split into a set().
+
+    checkvalues -- if this is a string it is split on whitespace into a set(),
+    otherwise coerced directly into a set().
+
+    d -- the data store.
+    """
+
+    val = d.getVar(variable)
+    if not val:
+        return ''
+    val = set(val.split())
+    if isinstance(checkvalues, str):
+        checkvalues = set(checkvalues.split())
+    else:
+        checkvalues = set(checkvalues)
+    return ' '.join(sorted(checkvalues & val))
+
 def cpu_count():
     return multiprocessing.cpu_count()
 
@@ -1378,10 +1400,10 @@ def edit_bblayers_conf(bblayers_conf, add, remove):
 
 def get_file_layer(filename, d):
     """Determine the collection (as defined by a layer's layer.conf file) containing the specified file"""
-    collections = (d.getVar('BBFILE_COLLECTIONS', True) or '').split()
+    collections = (d.getVar('BBFILE_COLLECTIONS') or '').split()
     collection_res = {}
     for collection in collections:
-        collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection, True) or ''
+        collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or ''
 
     def path_to_layer(path):
         # Use longest path so we handle nested layers
@@ -1394,7 +1416,7 @@ def get_file_layer(filename, d):
         return match
 
     result = None
-    bbfiles = (d.getVar('BBFILES', True) or '').split()
+    bbfiles = (d.getVar('BBFILES') or '').split()
     bbfilesmatch = False
     for bbfilesentry in bbfiles:
         if fnmatch.fnmatch(filename, bbfilesentry):
@@ -1471,7 +1493,7 @@ def export_proxies(d):
         if v in os.environ.keys():
             exported = True
         else:
-            v_proxy = d.getVar(v, True)
+            v_proxy = d.getVar(v)
             if v_proxy is not None:
                 os.environ[v] = v_proxy
                 exported = True
@@ -1481,7 +1503,7 @@ def export_proxies(d):
 
 def load_plugins(logger, plugins, pluginpath):
     def load_plugin(name):
-        logger.debug('Loading plugin %s' % name)
+        logger.debug(1, 'Loading plugin %s' % name)
         fp, pathname, description = imp.find_module(name, [pluginpath])
         try:
             return imp.load_module(name, fp, pathname, description)
@@ -1489,7 +1511,7 @@ def load_plugins(logger, plugins, pluginpath):
             if fp:
                 fp.close()
 
-    logger.debug('Loading plugins from %s...' % pluginpath)
+    logger.debug(1, 'Loading plugins from %s...' % pluginpath)
 
     expanded = (glob.glob(os.path.join(pluginpath, '*' + ext))
                 for ext in python_extensions)
@@ -1503,3 +1525,14 @@ def load_plugins(logger, plugins, pluginpath):
                 plugins.append(obj or plugin)
             else:
                 plugins.append(plugin)
+
+
+class LogCatcher(logging.Handler):
+    """Logging handler for collecting logged messages so you can check them later"""
+    def __init__(self):
+        self.messages = []
+        logging.Handler.__init__(self, logging.WARNING)
+    def emit(self, record):
+        self.messages.append(bb.build.logformatter.format(record))
+    def contains(self, message):
+        return (message in self.messages)

+ 47 - 24
bitbake/lib/bblayers/action.py

@@ -1,7 +1,9 @@
 import fnmatch
 import logging
 import os
+import shutil
 import sys
+import tempfile
 
 import bb.utils
 
@@ -16,41 +18,62 @@ def plugin_init(plugins):
 
 class ActionPlugin(LayerPlugin):
     def do_add_layer(self, args):
-        """Add a layer to bblayers.conf."""
-        layerdir = os.path.abspath(args.layerdir)
-        if not os.path.exists(layerdir):
-            sys.stderr.write("Specified layer directory doesn't exist\n")
-            return 1
+        """Add one or more layers to bblayers.conf."""
+        layerdirs = [os.path.abspath(ldir) for ldir in args.layerdir]
 
-        layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
-        if not os.path.exists(layer_conf):
-            sys.stderr.write("Specified layer directory doesn't contain a conf/layer.conf file\n")
-            return 1
+        for layerdir in layerdirs:
+            if not os.path.exists(layerdir):
+                sys.stderr.write("Specified layer directory %s doesn't exist\n" % layerdir)
+                return 1
+
+            layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
+            if not os.path.exists(layer_conf):
+                sys.stderr.write("Specified layer directory %s doesn't contain a conf/layer.conf file\n" % layerdir)
+                return 1
 
         bblayers_conf = os.path.join('conf', 'bblayers.conf')
         if not os.path.exists(bblayers_conf):
             sys.stderr.write("Unable to find bblayers.conf\n")
             return 1
 
-        notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdir, None)
-        if notadded:
-            for item in notadded:
-                sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
+        # Back up bblayers.conf to tempdir before we add layers
+        tempdir = tempfile.mkdtemp()
+        backup = tempdir + "/bblayers.conf.bak"
+        shutil.copy2(bblayers_conf, backup)
+
+        try:
+            notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
+            if not (args.force or notadded):
+                try:
+                    self.tinfoil.parseRecipes()
+                except bb.tinfoil.TinfoilUIException:
+                    # Restore the back up copy of bblayers.conf
+                    shutil.copy2(backup, bblayers_conf)
+                    bb.fatal("Parse failure with the specified layer added")
+                else:
+                    for item in notadded:
+                        sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
+        finally:
+            # Remove the back up copy of bblayers.conf
+            shutil.rmtree(tempdir)
 
     def do_remove_layer(self, args):
-        """Remove a layer from bblayers.conf."""
+        """Remove one or more layers from bblayers.conf."""
         bblayers_conf = os.path.join('conf', 'bblayers.conf')
         if not os.path.exists(bblayers_conf):
             sys.stderr.write("Unable to find bblayers.conf\n")
             return 1
 
-        if args.layerdir.startswith('*'):
-            layerdir = args.layerdir
-        elif not '/' in args.layerdir:
-            layerdir = '*/%s' % args.layerdir
-        else:
-            layerdir = os.path.abspath(args.layerdir)
-        (_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdir)
+        layerdirs = []
+        for item in args.layerdir:
+            if item.startswith('*'):
+                layerdir = item
+            elif not '/' in item:
+                layerdir = '*/%s' % item
+            else:
+                layerdir = os.path.abspath(item)
+            layerdirs.append(layerdir)
+        (_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
         if notremoved:
             for item in notremoved:
                 sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item)
@@ -180,7 +203,7 @@ build results (as the layer priority order has effectively changed).
 
         if first_regex:
             # Find the BBFILES entries that match (which will have come from this conf/layer.conf file)
-            bbfiles = str(self.tinfoil.config_data.getVar('BBFILES', True)).split()
+            bbfiles = str(self.tinfoil.config_data.getVar('BBFILES')).split()
             bbfiles_layer = []
             for item in bbfiles:
                 if first_regex.match(item):
@@ -222,10 +245,10 @@ build results (as the layer priority order has effectively changed).
 
     def register_commands(self, sp):
         parser_add_layer = self.add_command(sp, 'add-layer', self.do_add_layer, parserecipes=False)
-        parser_add_layer.add_argument('layerdir', help='Layer directory to add')
+        parser_add_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to add')
 
         parser_remove_layer = self.add_command(sp, 'remove-layer', self.do_remove_layer, parserecipes=False)
-        parser_remove_layer.add_argument('layerdir', help='Layer directory to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
+        parser_remove_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
         parser_remove_layer.set_defaults(func=self.do_remove_layer)
 
         parser_flatten = self.add_command(sp, 'flatten', self.do_flatten)

+ 1 - 1
bitbake/lib/bblayers/common.py

@@ -12,7 +12,7 @@ class LayerPlugin():
 
     def tinfoil_init(self, tinfoil):
         self.tinfoil = tinfoil
-        self.bblayers = (self.tinfoil.config_data.getVar('BBLAYERS', True) or "").split()
+        self.bblayers = (self.tinfoil.config_data.getVar('BBLAYERS') or "").split()
         layerconfs = self.tinfoil.config_data.varhistory.get_variable_items_files('BBFILE_COLLECTIONS', self.tinfoil.config_data)
         self.bbfile_collections = {layer: os.path.dirname(os.path.dirname(path)) for layer, path in layerconfs.items()}
 

+ 6 - 5
bitbake/lib/bblayers/layerindex.py

@@ -56,7 +56,7 @@ class LayerIndexPlugin(ActionPlugin):
         r = conn.getresponse()
         if r.status != 200:
             raise Exception("Failed to read " + path + ": %d %s" % (r.status, r.reason))
-        return json.loads(r.read())
+        return json.loads(r.read().decode())
 
     def get_layer_deps(self, layername, layeritems, layerbranches, layerdependencies, branchnum, selfname=False):
         def layeritems_info_id(items_name, layeritems):
@@ -151,7 +151,7 @@ class LayerIndexPlugin(ActionPlugin):
     def do_layerindex_fetch(self, args):
         """Fetches a layer from a layer index along with its dependent layers, and adds them to conf/bblayers.conf.
 """
-        apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL', True)
+        apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL')
         if not apiurl:
             logger.error("Cannot get BBLAYERS_LAYERINDEX_URL")
             return 1
@@ -173,8 +173,8 @@ class LayerIndexPlugin(ActionPlugin):
             return 1
 
         ignore_layers = []
-        for collection in self.tinfoil.config_data.getVar('BBFILE_COLLECTIONS', True).split():
-            lname = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_NAME_%s' % collection, True)
+        for collection in self.tinfoil.config_data.getVar('BBFILE_COLLECTIONS').split():
+            lname = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_NAME_%s' % collection)
             if lname:
                 ignore_layers.append(lname)
 
@@ -225,7 +225,7 @@ class LayerIndexPlugin(ActionPlugin):
             printedlayers.append(dependency)
 
         if repourls:
-            fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR', True)
+            fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR')
             if not fetchdir:
                 logger.error("Cannot get BBLAYERS_FETCH_DIR")
                 return 1
@@ -247,6 +247,7 @@ class LayerIndexPlugin(ActionPlugin):
                         logger.plain("Adding layer \"%s\" to conf/bblayers.conf" % name)
                     localargs = argparse.Namespace()
                     localargs.layerdir = layerdir
+                    localargs.force = args.force
                     self.do_add_layer(localargs)
                 else:
                     break

+ 35 - 34
bitbake/lib/bblayers/query.py

@@ -5,8 +5,6 @@ import sys
 import os
 import re
 
-import bb.cache
-import bb.providers
 import bb.utils
 
 from bblayers.common import LayerPlugin
@@ -62,7 +60,7 @@ are overlayed will also be listed, with a " (skipped)" suffix.
         # factor - however, each layer.conf is free to either prepend or append to
         # BBPATH (or indeed do crazy stuff with it). Thus the order in BBPATH might
         # not be exactly the order present in bblayers.conf either.
-        bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
+        bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
         overlayed_class_found = False
         for (classfile, classdirs) in classes.items():
             if len(classdirs) > 1:
@@ -114,7 +112,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
 
     def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_multi_provider_only, inherits):
         if inherits:
-            bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
+            bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
             for classname in inherits:
                 classfile = 'classes/%s.bbclass' % classname
                 if not bb.utils.which(bbpath, classfile, history=False):
@@ -122,15 +120,13 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
                     sys.exit(1)
 
         pkg_pn = self.tinfoil.cooker.recipecaches[''].pkg_pn
-        (latest_versions, preferred_versions) = bb.providers.findProviders(self.tinfoil.config_data, self.tinfoil.cooker.recipecaches[''], pkg_pn)
-        allproviders = bb.providers.allProviders(self.tinfoil.cooker.recipecaches[''])
+        (latest_versions, preferred_versions) = self.tinfoil.find_providers()
+        allproviders = self.tinfoil.get_all_providers()
 
         # Ensure we list skipped recipes
         # We are largely guessing about PN, PV and the preferred version here,
         # but we have no choice since skipped recipes are not fully parsed
         skiplist = list(self.tinfoil.cooker.skiplist.keys())
-        skiplist.sort( key=lambda fileitem: self.tinfoil.cooker.collection.calc_bbfile_priority(fileitem) )
-        skiplist.reverse()
         for fn in skiplist:
             recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
             p = recipe_parts[0]
@@ -158,14 +154,19 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
                     logger.plain("%s:", pn)
                 logger.plain("  %s %s%s", layer.ljust(20), ver, skipped)
 
-        global_inherit = (self.tinfoil.config_data.getVar('INHERIT', True) or "").split()
+        global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
         cls_re = re.compile('classes/')
 
         preffiles = []
         items_listed = False
         for p in sorted(pkg_pn):
             if pnspec:
-                if not fnmatch.fnmatch(p, pnspec):
+                found=False
+                for pnm in pnspec:
+                    if fnmatch.fnmatch(p, pnm):
+                        found=True
+                        break
+                if not found:
                     continue
 
             if len(allproviders[p]) > 1 or not show_multi_provider_only:
@@ -246,17 +247,28 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
 
 Lists recipes with the bbappends that apply to them as subitems.
 """
-
-        logger.plain('=== Appended recipes ===')
+        if args.pnspec:
+            logger.plain('=== Matched appended recipes ===')
+        else:
+            logger.plain('=== Appended recipes ===')
 
         pnlist = list(self.tinfoil.cooker_data.pkg_pn.keys())
         pnlist.sort()
         appends = False
         for pn in pnlist:
+            if args.pnspec:
+                found=False
+                for pnm in args.pnspec:
+                    if fnmatch.fnmatch(pn, pnm):
+                        found=True
+                        break
+                if not found:
+                    continue
+
             if self.show_appends_for_pn(pn):
                 appends = True
 
-        if self.show_appends_for_skipped():
+        if not args.pnspec and self.show_appends_for_skipped():
             appends = True
 
         if not appends:
@@ -265,10 +277,7 @@ Lists recipes with the bbappends that apply to them as subitems.
     def show_appends_for_pn(self, pn):
         filenames = self.tinfoil.cooker_data.pkg_pn[pn]
 
-        best = bb.providers.findBestProvider(pn,
-                                             self.tinfoil.config_data,
-                                             self.tinfoil.cooker_data,
-                                             self.tinfoil.cooker_data.pkg_pn)
+        best = self.tinfoil.find_best_provider(pn)
         best_filename = os.path.basename(best[3])
 
         return self.show_appends_output(filenames, best_filename)
@@ -319,12 +328,12 @@ NOTE: .bbappend files can impact the dependencies.
         ignore_layers = (args.ignore or '').split(',')
 
         pkg_fn = self.tinfoil.cooker_data.pkg_fn
-        bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
+        bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
         self.require_re = re.compile(r"require\s+(.+)")
         self.include_re = re.compile(r"include\s+(.+)")
         self.inherit_re = re.compile(r"inherit\s+(.+)")
 
-        global_inherit = (self.tinfoil.config_data.getVar('INHERIT', True) or "").split()
+        global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
 
         # The bb's DEPENDS and RDEPENDS
         for f in pkg_fn:
@@ -336,10 +345,7 @@ NOTE: .bbappend files can impact the dependencies.
             deps = self.tinfoil.cooker_data.deps[f]
             for pn in deps:
                 if pn in self.tinfoil.cooker_data.pkg_pn:
-                    best = bb.providers.findBestProvider(pn,
-                            self.tinfoil.config_data,
-                            self.tinfoil.cooker_data,
-                            self.tinfoil.cooker_data.pkg_pn)
+                    best = self.tinfoil.find_best_provider(pn)
                     self.check_cross_depends("DEPENDS", layername, f, best[3], args.filenames, ignore_layers)
 
             # The RDPENDS
@@ -352,14 +358,11 @@ NOTE: .bbappend files can impact the dependencies.
                     sorted_rdeps[k2] = 1
             all_rdeps = sorted_rdeps.keys()
             for rdep in all_rdeps:
-                all_p = bb.providers.getRuntimeProviders(self.tinfoil.cooker_data, rdep)
+                all_p, best = self.tinfoil.get_runtime_providers(rdep)
                 if all_p:
                     if f in all_p:
                         # The recipe provides this one itself, ignore
                         continue
-                    best = bb.providers.filterProvidersRunTime(all_p, rdep,
-                                    self.tinfoil.config_data,
-                                    self.tinfoil.cooker_data)[0][0]
                     self.check_cross_depends("RDEPENDS", layername, f, best, args.filenames, ignore_layers)
 
             # The RRECOMMENDS
@@ -372,14 +375,11 @@ NOTE: .bbappend files can impact the dependencies.
                     sorted_rrecs[k2] = 1
             all_rrecs = sorted_rrecs.keys()
             for rrec in all_rrecs:
-                all_p = bb.providers.getRuntimeProviders(self.tinfoil.cooker_data, rrec)
+                all_p, best = self.tinfoil.get_runtime_providers(rrec)
                 if all_p:
                     if f in all_p:
                         # The recipe provides this one itself, ignore
                         continue
-                    best = bb.providers.filterProvidersRunTime(all_p, rrec,
-                                    self.tinfoil.config_data,
-                                    self.tinfoil.cooker_data)[0][0]
                     self.check_cross_depends("RRECOMMENDS", layername, f, best, args.filenames, ignore_layers)
 
             # The inherit class
@@ -490,10 +490,11 @@ NOTE: .bbappend files can impact the dependencies.
         parser_show_recipes = self.add_command(sp, 'show-recipes', self.do_show_recipes)
         parser_show_recipes.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true')
         parser_show_recipes.add_argument('-m', '--multiple', help='only list where multiple recipes (in the same layer or different layers) exist for the same recipe name', action='store_true')
-        parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class', metavar='CLASS', default='')
-        parser_show_recipes.add_argument('pnspec', nargs='?', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
+        parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class(es) - separate multiple classes using , (without spaces)', metavar='CLASS', default='')
+        parser_show_recipes.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
 
-        self.add_command(sp, 'show-appends', self.do_show_appends)
+        parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
+        parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
 
         parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends)
         parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true')

+ 12 - 5
bitbake/lib/bs4/builder/_html5lib.py

@@ -15,7 +15,14 @@ from bs4.element import (
     whitespace_re,
 )
 import html5lib
+try:
+    # html5lib >= 0.99999999/1.0b9
+    from html5lib.treebuilders import base as treebuildersbase
+except ImportError:
+    # html5lib <= 0.9999999/1.0b8
+    from html5lib.treebuilders import _base as treebuildersbase
 from html5lib.constants import namespaces
+
 from bs4.element import (
     Comment,
     Doctype,
@@ -67,7 +74,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
         return '<html><head></head><body>%s</body></html>' % fragment
 
 
-class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
+class TreeBuilderForHtml5lib(treebuildersbase.TreeBuilder):
 
     def __init__(self, soup, namespaceHTMLElements):
         self.soup = soup
@@ -105,7 +112,7 @@ class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
         return self.soup
 
     def getFragment(self):
-        return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
+        return treebuildersbase.TreeBuilder.getFragment(self).element
 
 class AttrList(object):
     def __init__(self, element):
@@ -137,9 +144,9 @@ class AttrList(object):
         return name in list(self.attrs.keys())
 
 
-class Element(html5lib.treebuilders._base.Node):
+class Element(treebuildersbase.Node):
     def __init__(self, element, soup, namespace):
-        html5lib.treebuilders._base.Node.__init__(self, element.name)
+        treebuildersbase.Node.__init__(self, element.name)
         self.element = element
         self.soup = soup
         self.namespace = namespace
@@ -324,7 +331,7 @@ class Element(html5lib.treebuilders._base.Node):
 
 class TextNode(Element):
     def __init__(self, element, soup):
-        html5lib.treebuilders._base.Node.__init__(self, None)
+        treebuildersbase.Node.__init__(self, None)
         self.element = element
         self.soup = soup
 

+ 56 - 23
bitbake/lib/prserv/serv.py

@@ -5,16 +5,12 @@ import threading
 import queue
 import socket
 import io
-
-try:
-    import sqlite3
-except ImportError:
-    from pysqlite2 import dbapi2 as sqlite3
-
-import bb.server.xmlrpc
+import sqlite3
+import bb.server.xmlrpcclient
 import prserv
 import prserv.db
 import errno
+import select
 
 logger = logging.getLogger("BitBake.PRserv")
 
@@ -64,6 +60,8 @@ class PRServer(SimpleXMLRPCServer):
         self.register_function(self.importone, "importone")
         self.register_introspection_functions()
 
+        self.quitpipein, self.quitpipeout = os.pipe()
+
         self.requestqueue = queue.Queue()
         self.handlerthread = threading.Thread(target = self.process_request_thread)
         self.handlerthread.daemon = False
@@ -80,12 +78,14 @@ class PRServer(SimpleXMLRPCServer):
 
         bb.utils.set_process_name("PRServ Handler")
 
-        while not self.quit:
+        while not self.quitflag:
             try:
                 (request, client_address) = self.requestqueue.get(True, 30)
             except queue.Empty:
                 self.table.sync_if_dirty()
                 continue
+            if request is None:
+                continue
             try:
                 self.finish_request(request, client_address)
                 self.shutdown_request(request)
@@ -105,7 +105,8 @@ class PRServer(SimpleXMLRPCServer):
     def sigterm_handler(self, signum, stack):
         if self.table:
             self.table.sync()
-        self.quit=True
+        self.quit()
+        self.requestqueue.put((None, None))
 
     def process_request(self, request, client_address):
         self.requestqueue.put((request, client_address))
@@ -141,7 +142,7 @@ class PRServer(SimpleXMLRPCServer):
         return self.table.importone(version, pkgarch, checksum, value)
 
     def ping(self):
-        return not self.quit
+        return not self.quitflag
 
     def getinfo(self):
         return (self.host, self.port)
@@ -157,12 +158,17 @@ class PRServer(SimpleXMLRPCServer):
             return None
 
     def quit(self):
-        self.quit=True
+        self.quitflag=True
+        os.write(self.quitpipeout, b"q")
+        os.close(self.quitpipeout)
         return
 
     def work_forever(self,):
-        self.quit = False
-        self.timeout = 0.5
+        self.quitflag = False
+        # This timeout applies to the poll in TCPServer, we need the select 
+        # below to wake on our quit pipe closing. We only ever call into handle_request
+        # if there is data there.
+        self.timeout = 0.01
 
         bb.utils.set_process_name("PRServ")
 
@@ -174,12 +180,17 @@ class PRServer(SimpleXMLRPCServer):
                      (self.dbfile, self.host, self.port, str(os.getpid())))
 
         self.handlerthread.start()
-        while not self.quit:
-            self.handle_request()
+        while not self.quitflag:
+            ready = select.select([self.fileno(), self.quitpipein], [], [], 30)
+            if self.quitflag:
+                break
+            if self.fileno() in ready[0]:
+                self.handle_request()
         self.handlerthread.join()
         self.db.disconnect()
         logger.info("PRServer: stopping...")
         self.server_close()
+        os.close(self.quitpipein)
         return
 
     def start(self):
@@ -187,6 +198,7 @@ class PRServer(SimpleXMLRPCServer):
             pid = self.daemonize()
         else:
             pid = self.fork()
+            self.pid = pid
 
         # Ensure both the parent sees this and the child from the work_forever log entry above
         logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
@@ -242,12 +254,25 @@ class PRServer(SimpleXMLRPCServer):
 
         sys.stdout.flush()
         sys.stderr.flush()
+
+        # We could be called from a python thread with io.StringIO as
+        # stdout/stderr or it could be 'real' unix fd forking where we need
+        # to physically close the fds to prevent the program launching us from
+        # potentially hanging on a pipe. Handle both cases.
         si = open('/dev/null', 'r')
+        try:
+            os.dup2(si.fileno(),sys.stdin.fileno())
+        except (AttributeError, io.UnsupportedOperation):
+            sys.stdin = si
         so = open(self.logfile, 'a+')
-        se = so
-        os.dup2(si.fileno(),sys.stdin.fileno())
-        os.dup2(so.fileno(),sys.stdout.fileno())
-        os.dup2(se.fileno(),sys.stderr.fileno())
+        try:
+            os.dup2(so.fileno(),sys.stdout.fileno())
+        except (AttributeError, io.UnsupportedOperation):
+            sys.stdout = so
+        try:
+            os.dup2(so.fileno(),sys.stderr.fileno())
+        except (AttributeError, io.UnsupportedOperation):
+            sys.stderr = so
 
         # Clear out all log handlers prior to the fork() to avoid calling
         # event handlers not part of the PRserver
@@ -292,7 +317,7 @@ class PRServerConnection(object):
             host, port = singleton.getinfo()
         self.host = host
         self.port = port
-        self.connection, self.transport = bb.server.xmlrpc._create_server(self.host, self.port)
+        self.connection, self.transport = bb.server.xmlrpcclient._create_server(self.host, self.port)
 
     def terminate(self):
         try:
@@ -420,7 +445,10 @@ class PRServiceConfigError(Exception):
 def auto_start(d):
     global singleton
 
-    host_params = list(filter(None, (d.getVar('PRSERV_HOST', True) or '').split(':')))
+    # Shutdown any existing PR Server
+    auto_shutdown()
+
+    host_params = list(filter(None, (d.getVar('PRSERV_HOST') or '').split(':')))
     if not host_params:
         return None
 
@@ -431,7 +459,7 @@ def auto_start(d):
 
     if is_local_special(host_params[0], int(host_params[1])) and not singleton:
         import bb.utils
-        cachedir = (d.getVar("PERSISTENT_DIR", True) or d.getVar("CACHE", True))
+        cachedir = (d.getVar("PERSISTENT_DIR") or d.getVar("CACHE"))
         if not cachedir:
             logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
             raise PRServiceConfigError
@@ -456,7 +484,7 @@ def auto_start(d):
         logger.critical("PRservice %s:%d not available" % (host, port))
         raise PRServiceConfigError
 
-def auto_shutdown(d=None):
+def auto_shutdown():
     global singleton
     if singleton:
         host, port = singleton.getinfo()
@@ -464,6 +492,11 @@ def auto_shutdown(d=None):
             PRServerConnection(host, port).terminate()
         except:
             logger.critical("Stop PRService %s:%d failed" % (host,port))
+
+        try:
+            os.waitpid(singleton.prserv.pid, 0)
+        except ChildProcessError:
+            pass
         singleton = None
 
 def ping(host, port):

+ 22 - 0
bitbake/lib/simplediff/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2008 - 2013 Paul Butler and contributors
+
+This sofware may be used under a zlib/libpng-style license:
+
+This software is provided 'as-is', without any express or implied warranty. In
+no event will the authors be held liable for any damages arising from the use
+of this software.
+
+Permission is granted to anyone to use this software for any purpose, including
+commercial applications, and to alter it and redistribute it freely, subject to
+the following restrictions:
+
+1. The origin of this software must not be misrepresented; you must not claim
+that you wrote the original software. If you use this software in a product, an
+acknowledgment in the product documentation would be appreciated but is not
+required.
+
+2. Altered source versions must be plainly marked as such, and must not be
+misrepresented as being the original software.
+
+3. This notice may not be removed or altered from any source distribution.
+

+ 198 - 0
bitbake/lib/simplediff/__init__.py

@@ -0,0 +1,198 @@
+'''
+Simple Diff for Python version 1.0
+
+Annotate two versions of a list with the values that have been
+changed between the versions, similar to unix's `diff` but with
+a dead-simple Python interface.
+
+(C) Paul Butler 2008-2012 <http://www.paulbutler.org/>
+May be used and distributed under the zlib/libpng license
+<http://www.opensource.org/licenses/zlib-license.php>
+'''
+
+__all__ = ['diff', 'string_diff', 'html_diff']
+__version__ = '1.0'
+
+
+def diff(old, new):
+    '''
+    Find the differences between two lists. Returns a list of pairs, where the
+    first value is in ['+','-','='] and represents an insertion, deletion, or
+    no change for that list. The second value of the pair is the list
+    of elements.
+
+    Params:
+        old     the old list of immutable, comparable values (ie. a list
+                of strings)
+        new     the new list of immutable, comparable values
+   
+    Returns:
+        A list of pairs, with the first part of the pair being one of three
+        strings ('-', '+', '=') and the second part being a list of values from
+        the original old and/or new lists. The first part of the pair
+        corresponds to whether the list of values is a deletion, insertion, or
+        unchanged, respectively.
+
+    Examples:
+        >>> diff([1,2,3,4],[1,3,4])
+        [('=', [1]), ('-', [2]), ('=', [3, 4])]
+
+        >>> diff([1,2,3,4],[2,3,4,1])
+        [('-', [1]), ('=', [2, 3, 4]), ('+', [1])]
+
+        >>> diff('The quick brown fox jumps over the lazy dog'.split(),
+        ...      'The slow blue cheese drips over the lazy carrot'.split())
+        ... # doctest: +NORMALIZE_WHITESPACE
+        [('=', ['The']),
+         ('-', ['quick', 'brown', 'fox', 'jumps']),
+         ('+', ['slow', 'blue', 'cheese', 'drips']),
+         ('=', ['over', 'the', 'lazy']),
+         ('-', ['dog']),
+         ('+', ['carrot'])]
+
+    '''
+
+    # Create a map from old values to their indices
+    old_index_map = dict()
+    for i, val in enumerate(old):
+        old_index_map.setdefault(val,list()).append(i)
+
+    # Find the largest substring common to old and new.
+    # We use a dynamic programming approach here.
+    # 
+    # We iterate over each value in the `new` list, calling the
+    # index `inew`. At each iteration, `overlap[i]` is the
+    # length of the largest suffix of `old[:i]` equal to a suffix
+    # of `new[:inew]` (or unset when `old[i]` != `new[inew]`).
+    #
+    # At each stage of iteration, the new `overlap` (called
+    # `_overlap` until the original `overlap` is no longer needed)
+    # is built from the old one.
+    #
+    # If the length of overlap exceeds the largest substring
+    # seen so far (`sub_length`), we update the largest substring
+    # to the overlapping strings.
+
+    overlap = dict()
+    # `sub_start_old` is the index of the beginning of the largest overlapping
+    # substring in the old list. `sub_start_new` is the index of the beginning
+    # of the same substring in the new list. `sub_length` is the length that
+    # overlaps in both.
+    # These track the largest overlapping substring seen so far, so naturally
+    # we start with a 0-length substring.
+    sub_start_old = 0
+    sub_start_new = 0
+    sub_length = 0
+
+    for inew, val in enumerate(new):
+        _overlap = dict()
+        for iold in old_index_map.get(val,list()):
+            # now we are considering all values of iold such that
+            # `old[iold] == new[inew]`.
+            _overlap[iold] = (iold and overlap.get(iold - 1, 0)) + 1
+            if(_overlap[iold] > sub_length):
+                # this is the largest substring seen so far, so store its
+                # indices
+                sub_length = _overlap[iold]
+                sub_start_old = iold - sub_length + 1
+                sub_start_new = inew - sub_length + 1
+        overlap = _overlap
+
+    if sub_length == 0:
+        # If no common substring is found, we return an insert and delete...
+        return (old and [('-', old)] or []) + (new and [('+', new)] or [])
+    else:
+        # ...otherwise, the common substring is unchanged and we recursively
+        # diff the text before and after that substring
+        return diff(old[ : sub_start_old], new[ : sub_start_new]) + \
+               [('=', new[sub_start_new : sub_start_new + sub_length])] + \
+               diff(old[sub_start_old + sub_length : ],
+                       new[sub_start_new + sub_length : ])
+
+
+def string_diff(old, new):
+    '''
+    Returns the difference between the old and new strings when split on
+    whitespace. Considers punctuation a part of the word
+
+    This function is intended as an example; you'll probably want
+    a more sophisticated wrapper in practice.
+
+    Params:
+        old     the old string
+        new     the new string
+
+    Returns:
+        the output of `diff` on the two strings after splitting them
+        on whitespace (a list of change instructions; see the docstring
+        of `diff`)
+
+    Examples:
+        >>> string_diff('The quick brown fox', 'The fast blue fox')
+        ... # doctest: +NORMALIZE_WHITESPACE
+        [('=', ['The']),
+         ('-', ['quick', 'brown']),
+         ('+', ['fast', 'blue']),
+         ('=', ['fox'])]
+
+    '''
+    return diff(old.split(), new.split())
+
+
+def html_diff(old, new):
+    '''
+    Returns the difference between two strings (as in stringDiff) in
+    HTML format. HTML code in the strings is NOT escaped, so you
+    will get weird results if the strings contain HTML.
+
+    This function is intended as an example; you'll probably want
+    a more sophisticated wrapper in practice.
+
+    Params:
+        old     the old string
+        new     the new string
+
+    Returns:
+        the output of the diff expressed with HTML <ins> and <del>
+        tags.
+
+    Examples:
+        >>> html_diff('The quick brown fox', 'The fast blue fox')
+        'The <del>quick brown</del> <ins>fast blue</ins> fox'
+    '''
+    con = {'=': (lambda x: x),
+           '+': (lambda x: "<ins>" + x + "</ins>"),
+           '-': (lambda x: "<del>" + x + "</del>")}
+    return " ".join([(con[a])(" ".join(b)) for a, b in string_diff(old, new)])
+
+
+def check_diff(old, new):
+    '''
+    This tests that diffs returned by `diff` are valid. You probably won't
+    want to use this function, but it's provided for documentation and
+    testing.
+
+    A diff should satisfy the property that the old input is equal to the
+    elements of the result annotated with '-' or '=' concatenated together.
+    Likewise, the new input is equal to the elements of the result annotated
+    with '+' or '=' concatenated together. This function compares `old`,
+    `new`, and the results of `diff(old, new)` to ensure this is true.
+
+    Tests:
+        >>> check_diff('ABCBA', 'CBABA')
+        >>> check_diff('Foobarbaz', 'Foobarbaz')
+        >>> check_diff('Foobarbaz', 'Boobazbam')
+        >>> check_diff('The quick brown fox', 'Some quick brown car')
+        >>> check_diff('A thick red book', 'A quick blue book')
+        >>> check_diff('dafhjkdashfkhasfjsdafdasfsda', 'asdfaskjfhksahkfjsdha')
+        >>> check_diff('88288822828828288282828', '88288882882828282882828')
+        >>> check_diff('1234567890', '24689')
+    '''
+    old = list(old)
+    new = list(new)
+    result = diff(old, new)
+    _old = [val for (a, vals) in result if (a in '=-') for val in vals]
+    assert old == _old, 'Expected %s, got %s' % (old, _old)
+    _new = [val for (a, vals) in result if (a in '=+') for val in vals]
+    assert new == _new, 'Expected %s, got %s' % (new, _new)
+

+ 7 - 5
bitbake/lib/toaster/bldcollector/urls.py

@@ -1,7 +1,7 @@
 #
 # BitBake Toaster Implementation
 #
-# Copyright (C) 2014        Intel Corporation
+# Copyright (C) 2014-2017   Intel Corporation
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 as
@@ -17,9 +17,11 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 
-from django.conf.urls import patterns, include, url
+from django.conf.urls import include, url
 
-urlpatterns = patterns('bldcollector.views',
+import bldcollector.views
+
+urlpatterns = [
         # landing point for pushing a bitbake_eventlog.json file to this toaster instace
-        url(r'^eventfile$', 'eventfile', name='eventfile'),
-        )
+        url(r'^eventfile$', bldcollector.views.eventfile, name='eventfile'),
+]

+ 2 - 2
bitbake/lib/toaster/bldcontrol/bbcontroller.py

@@ -37,8 +37,8 @@ class BitbakeController(object):
     """
 
     def __init__(self, be):
-        import bb.server.xmlrpc
-        self.connection = bb.server.xmlrpc._create_server(be.bbaddress,
+        import bb.server.xmlrpcclient
+        self.connection = bb.server.xmlrpcclient._create_server(be.bbaddress,
                                                           int(be.bbport))[0]
 
     def _runCommand(self, command):

+ 143 - 93
bitbake/lib/toaster/bldcontrol/localhostbecontroller.py

@@ -24,10 +24,11 @@ import os
 import sys
 import re
 import shutil
+import time
 from django.db import transaction
 from django.db.models import Q
 from bldcontrol.models import BuildEnvironment, BRLayer, BRVariable, BRTarget, BRBitbake
-from orm.models import CustomImageRecipe, Layer, Layer_Version, ProjectLayer
+from orm.models import CustomImageRecipe, Layer, Layer_Version, ProjectLayer, ToasterSetting
 import subprocess
 
 from toastermain import settings
@@ -51,12 +52,14 @@ class LocalhostBEController(BuildEnvironmentController):
         self.pokydirname = None
         self.islayerset = False
 
-    def _shellcmd(self, command, cwd=None, nowait=False):
+    def _shellcmd(self, command, cwd=None, nowait=False,env=None):
         if cwd is None:
             cwd = self.be.sourcedir
+        if env is None:
+            env=os.environ.copy()
 
-        logger.debug("lbc_shellcmmd: (%s) %s" % (cwd, command))
-        p = subprocess.Popen(command, cwd = cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        logger.debug("lbc_shellcmd: (%s) %s" % (cwd, command))
+        p = subprocess.Popen(command, cwd = cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
         if nowait:
             return
         (out,err) = p.communicate()
@@ -75,7 +78,7 @@ class LocalhostBEController(BuildEnvironmentController):
     def getGitCloneDirectory(self, url, branch):
         """Construct unique clone directory name out of url and branch."""
         if branch != "HEAD":
-            return "_toaster_clones/_%s_%s" % (re.sub('[:/@%]', '_', url), branch)
+            return "_toaster_clones/_%s_%s" % (re.sub('[:/@+%]', '_', url), branch)
 
         # word of attention; this is a localhost-specific issue; only on the localhost we expect to have "HEAD" releases
         # which _ALWAYS_ means the current poky checkout
@@ -85,6 +88,11 @@ class LocalhostBEController(BuildEnvironmentController):
         return local_checkout_path
 
 
+    def setCloneStatus(self,bitbake,status,total,current):
+        bitbake.req.build.repos_cloned=current
+        bitbake.req.build.repos_to_clone=total
+        bitbake.req.build.save()
+
     def setLayers(self, bitbake, layers, targets):
         """ a word of attention: by convention, the first layer for any build will be poky! """
 
@@ -92,6 +100,8 @@ class LocalhostBEController(BuildEnvironmentController):
 
         layerlist = []
         nongitlayerlist = []
+        git_env = os.environ.copy()
+        # (note: add custom environment settings here)
 
         # set layers in the layersource
 
@@ -132,7 +142,7 @@ class LocalhostBEController(BuildEnvironmentController):
         cached_layers = {}
 
         try:
-            for remotes in self._shellcmd("git remote -v", self.be.sourcedir).split("\n"):
+            for remotes in self._shellcmd("git remote -v", self.be.sourcedir,env=git_env).split("\n"):
                 try:
                     remote = remotes.split("\t")[1].split(" ")[0]
                     if remote not in cached_layers:
@@ -147,7 +157,13 @@ class LocalhostBEController(BuildEnvironmentController):
         logger.info("Using pre-checked out source for layer %s", cached_layers)
 
         # 3. checkout the repositories
+        clone_count=0
+        clone_total=len(gitrepos.keys())
+        self.setCloneStatus(bitbake,'Started',clone_total,clone_count)
         for giturl, commit in gitrepos.keys():
+            self.setCloneStatus(bitbake,'progress',clone_total,clone_count)
+            clone_count += 1
+
             localdirname = os.path.join(self.be.sourcedir, self.getGitCloneDirectory(giturl, commit))
             logger.debug("localhostbecontroller: giturl %s:%s checking out in current directory %s" % (giturl, commit, localdirname))
 
@@ -155,7 +171,7 @@ class LocalhostBEController(BuildEnvironmentController):
             if os.path.exists(localdirname):
                 try:
                     localremotes = self._shellcmd("git remote -v",
-                                                  localdirname)
+                                                  localdirname,env=git_env)
                     if not giturl in localremotes and commit != 'HEAD':
                         raise BuildSetupException("Existing git repository at %s, but with different remotes ('%s', expected '%s'). Toaster will not continue out of fear of damaging something." % (localdirname, ", ".join(localremotes.split("\n")), giturl))
                 except ShellCmdException:
@@ -165,18 +181,18 @@ class LocalhostBEController(BuildEnvironmentController):
             else:
                 if giturl in cached_layers:
                     logger.debug("localhostbecontroller git-copying %s to %s" % (cached_layers[giturl], localdirname))
-                    self._shellcmd("git clone \"%s\" \"%s\"" % (cached_layers[giturl], localdirname))
-                    self._shellcmd("git remote remove origin", localdirname)
-                    self._shellcmd("git remote add origin \"%s\"" % giturl, localdirname)
+                    self._shellcmd("git clone \"%s\" \"%s\"" % (cached_layers[giturl], localdirname),env=git_env)
+                    self._shellcmd("git remote remove origin", localdirname,env=git_env)
+                    self._shellcmd("git remote add origin \"%s\"" % giturl, localdirname,env=git_env)
                 else:
                     logger.debug("localhostbecontroller: cloning %s in %s" % (giturl, localdirname))
-                    self._shellcmd('git clone "%s" "%s"' % (giturl, localdirname))
+                    self._shellcmd('git clone "%s" "%s"' % (giturl, localdirname),env=git_env)
 
             # branch magic name "HEAD" will inhibit checkout
             if commit != "HEAD":
                 logger.debug("localhostbecontroller: checking out commit %s to %s " % (commit, localdirname))
                 ref = commit if re.match('^[a-fA-F0-9]+$', commit) else 'origin/%s' % commit
-                self._shellcmd('git fetch --all && git reset --hard "%s"' % ref, localdirname)
+                self._shellcmd('git fetch --all && git reset --hard "%s"' % ref, localdirname,env=git_env)
 
             # take the localdirname as poky dir if we can find the oe-init-build-env
             if self.pokydirname is None and os.path.exists(os.path.join(localdirname, "oe-init-build-env")):
@@ -186,7 +202,7 @@ class LocalhostBEController(BuildEnvironmentController):
                 # make sure we have a working bitbake
                 if not os.path.exists(os.path.join(self.pokydirname, 'bitbake')):
                     logger.debug("localhostbecontroller: checking bitbake into the poky dirname %s " % self.pokydirname)
-                    self._shellcmd("git clone -b \"%s\" \"%s\" \"%s\" " % (bitbake.commit, bitbake.giturl, os.path.join(self.pokydirname, 'bitbake')))
+                    self._shellcmd("git clone -b \"%s\" \"%s\" \"%s\" " % (bitbake.commit, bitbake.giturl, os.path.join(self.pokydirname, 'bitbake')),env=git_env)
 
             # verify our repositories
             for name, dirpath in gitrepos[(giturl, commit)]:
@@ -198,74 +214,96 @@ class LocalhostBEController(BuildEnvironmentController):
                 if name != "bitbake":
                     layerlist.append(localdirpath.rstrip("/"))
 
+        self.setCloneStatus(bitbake,'complete',clone_total,clone_count)
         logger.debug("localhostbecontroller: current layer list %s " % pformat(layerlist))
 
+        if self.pokydirname is None and os.path.exists(os.path.join(self.be.sourcedir, "oe-init-build-env")):
+            logger.debug("localhostbecontroller: selected poky dir name %s" % self.be.sourcedir)
+            self.pokydirname = self.be.sourcedir
+
         # 5. create custom layer and add custom recipes to it
-        layerpath = os.path.join(self.be.builddir,
-                                 CustomImageRecipe.LAYER_NAME)
         for target in targets:
             try:
-                customrecipe = CustomImageRecipe.objects.get(name=target.target,
-                                                             project=bitbake.req.project)
+                customrecipe = CustomImageRecipe.objects.get(
+                    name=target.target,
+                    project=bitbake.req.project)
+
+                custom_layer_path = self.setup_custom_image_recipe(
+                    customrecipe, layers)
+
+                if os.path.isdir(custom_layer_path):
+                    layerlist.append(custom_layer_path)
+
             except CustomImageRecipe.DoesNotExist:
-                continue # not a custom recipe, skip
-
-            # create directory structure
-            for name in ("conf", "recipes"):
-                path = os.path.join(layerpath, name)
-                if not os.path.isdir(path):
-                    os.makedirs(path)
-
-            # create layer.oonf
-            config = os.path.join(layerpath, "conf", "layer.conf")
-            if not os.path.isfile(config):
-                with open(config, "w") as conf:
-                    conf.write('BBPATH .= ":${LAYERDIR}"\nBBFILES += "${LAYERDIR}/recipes/*.bb"\n')
-
-            # Update the Layer_Version dirpath that has our base_recipe in
-            # to be able to read the base recipe to then  generate the
-            # custom recipe.
-            br_layer_base_recipe = layers.get(
-                layer_version=customrecipe.base_recipe.layer_version)
-
-            br_layer_base_dirpath = \
-                    os.path.join(self.be.sourcedir,
-                                 self.getGitCloneDirectory(
-                                     br_layer_base_recipe.giturl,
-                                     br_layer_base_recipe.commit),
-                                 customrecipe.base_recipe.layer_version.dirpath
-                                )
-
-            customrecipe.base_recipe.layer_version.dirpath = \
-                         br_layer_base_dirpath
-
-            customrecipe.base_recipe.layer_version.save()
-
-            # create recipe
-            recipe_path = \
-                    os.path.join(layerpath, "recipes", "%s.bb" % target.target)
-            with open(recipe_path, "w") as recipef:
-                recipef.write(customrecipe.generate_recipe_file_contents())
-
-            # Update the layer and recipe objects
-            customrecipe.layer_version.dirpath = layerpath
-            customrecipe.layer_version.save()
-
-            customrecipe.file_path = recipe_path
-            customrecipe.save()
-
-            # create *Layer* objects needed for build machinery to work
-            BRLayer.objects.get_or_create(req=target.req,
-                                          name=layer.name,
-                                          dirpath=layerpath,
-                                          giturl="file://%s" % layerpath)
-        if os.path.isdir(layerpath):
-            layerlist.append(layerpath)
+                continue  # not a custom recipe, skip
 
-        self.islayerset = True
         layerlist.extend(nongitlayerlist)
+        logger.debug("\n\nset layers gives this list %s" % pformat(layerlist))
+        self.islayerset = True
         return layerlist
 
+    def setup_custom_image_recipe(self, customrecipe, layers):
+        """ Set up toaster-custom-images layer and recipe files """
+        layerpath = os.path.join(self.be.builddir,
+                                 CustomImageRecipe.LAYER_NAME)
+
+        # create directory structure
+        for name in ("conf", "recipes"):
+            path = os.path.join(layerpath, name)
+            if not os.path.isdir(path):
+                os.makedirs(path)
+
+        # create layer.conf
+        config = os.path.join(layerpath, "conf", "layer.conf")
+        if not os.path.isfile(config):
+            with open(config, "w") as conf:
+                conf.write('BBPATH .= ":${LAYERDIR}"\nBBFILES += "${LAYERDIR}/recipes/*.bb"\n')
+
+        # Update the Layer_Version dirpath that has our base_recipe in
+        # to be able to read the base recipe to then  generate the
+        # custom recipe.
+        br_layer_base_recipe = layers.get(
+            layer_version=customrecipe.base_recipe.layer_version)
+
+        # If the layer is one that we've cloned we know where it lives
+        if br_layer_base_recipe.giturl and br_layer_base_recipe.commit:
+            layer_path = self.getGitCloneDirectory(
+                br_layer_base_recipe.giturl,
+                br_layer_base_recipe.commit)
+        # Otherwise it's a local layer
+        elif br_layer_base_recipe.local_source_dir:
+            layer_path = br_layer_base_recipe.local_source_dir
+        else:
+            logger.error("Unable to workout the dir path for the custom"
+                         " image recipe")
+
+        br_layer_base_dirpath = os.path.join(
+            self.be.sourcedir,
+            layer_path,
+            customrecipe.base_recipe.layer_version.dirpath)
+
+        customrecipe.base_recipe.layer_version.dirpath = br_layer_base_dirpath
+
+        customrecipe.base_recipe.layer_version.save()
+
+        # create recipe
+        recipe_path = os.path.join(layerpath, "recipes", "%s.bb" %
+                                   customrecipe.name)
+        with open(recipe_path, "w") as recipef:
+            recipef.write(customrecipe.generate_recipe_file_contents())
+
+        # Update the layer and recipe objects
+        customrecipe.layer_version.dirpath = layerpath
+        customrecipe.layer_version.layer.local_source_dir = layerpath
+        customrecipe.layer_version.layer.save()
+        customrecipe.layer_version.save()
+
+        customrecipe.file_path = recipe_path
+        customrecipe.save()
+
+        return layerpath
+
+
     def readServerLogFile(self):
         return open(os.path.join(self.be.builddir, "toaster_server.log"), "r").read()
 
@@ -277,23 +315,17 @@ class LocalhostBEController(BuildEnvironmentController):
         builddir = '%s-toaster-%d' % (self.be.builddir, bitbake.req.project.id)
         oe_init = os.path.join(self.pokydirname, 'oe-init-build-env')
         # init build environment
-        self._shellcmd("bash -c 'source %s %s'" % (oe_init, builddir),
+        try:
+            custom_script = ToasterSetting.objects.get(name="CUSTOM_BUILD_INIT_SCRIPT").value
+            custom_script = custom_script.replace("%BUILDDIR%" ,builddir)
+            self._shellcmd("bash -c 'source %s'" % (custom_script))
+        except ToasterSetting.DoesNotExist:
+            self._shellcmd("bash -c 'source %s %s'" % (oe_init, builddir),
                        self.be.sourcedir)
 
         # update bblayers.conf
-        bblconfpath = os.path.join(builddir, "conf/bblayers.conf")
-        conflines = open(bblconfpath, "r").readlines()
-        skip = False
+        bblconfpath = os.path.join(builddir, "conf/toaster-bblayers.conf")
         with open(bblconfpath, 'w') as bblayers:
-            for line in conflines:
-                if line.startswith("# line added by toaster"):
-                    skip = True
-                    continue
-                if skip:
-                    skip = False
-                else:
-                    bblayers.write(line)
-
             bblayers.write('# line added by toaster build control\n'
                            'BBLAYERS = "%s"' % ' '.join(layers))
 
@@ -304,15 +336,32 @@ class LocalhostBEController(BuildEnvironmentController):
                 conf.write('%s="%s"\n' % (var.name, var.value))
             conf.write('INHERIT+="toaster buildhistory"')
 
+        # clean the Toaster to build environment
+        env_clean = 'unset BBPATH;' # clean BBPATH for <= YP-2.4.0
+
         # run bitbake server from the clone
         bitbake = os.path.join(self.pokydirname, 'bitbake', 'bin', 'bitbake')
-        self._shellcmd('bash -c \"source %s %s; BITBAKE_UI="knotty" %s --read %s '
-                       '--server-only -t xmlrpc -B 0.0.0.0:0\"' % (oe_init,
-                       builddir, bitbake, confpath), self.be.sourcedir)
+        toasterlayers = os.path.join(builddir,"conf/toaster-bblayers.conf")
+        self._shellcmd('%s bash -c \"source %s %s; BITBAKE_UI="knotty" %s --read %s --read %s '
+                       '--server-only -B 0.0.0.0:0\"' % (env_clean, oe_init,
+                       builddir, bitbake, confpath, toasterlayers), self.be.sourcedir)
 
         # read port number from bitbake.lock
-        self.be.bbport = ""
+        self.be.bbport = -1
         bblock = os.path.join(builddir, 'bitbake.lock')
+        # allow 10 seconds for bb lock file to appear but also be populated
+        for lock_check in range(10):
+            if not os.path.exists(bblock):
+                logger.debug("localhostbecontroller: waiting for bblock file to appear")
+                time.sleep(1)
+                continue
+            if 10 < os.stat(bblock).st_size:
+                break
+            logger.debug("localhostbecontroller: waiting for bblock content to appear")
+            time.sleep(1)
+        else:
+            raise BuildSetupException("Cannot find bitbake server lock file '%s'. Aborting." % bblock)
+
         with open(bblock) as fplock:
             for line in fplock:
                 if ":" in line:
@@ -320,7 +369,7 @@ class LocalhostBEController(BuildEnvironmentController):
                     logger.debug("localhostbecontroller: bitbake port %s", self.be.bbport)
                     break
 
-        if not self.be.bbport:
+        if -1 == self.be.bbport:
             raise BuildSetupException("localhostbecontroller: can't read bitbake port from %s" % bblock)
 
         self.be.bbaddress = "localhost"
@@ -341,10 +390,11 @@ class LocalhostBEController(BuildEnvironmentController):
         log = os.path.join(builddir, 'toaster_ui.log')
         local_bitbake = os.path.join(os.path.dirname(os.getenv('BBBASEDIR')),
                                      'bitbake')
-        self._shellcmd(['bash -c \"(TOASTER_BRBE="%s" BBSERVER="0.0.0.0:-1" '
-                        '%s %s -u toasterui --token="" >>%s 2>&1;'
-                        'BITBAKE_UI="knotty" BBSERVER=0.0.0.0:-1 %s -m)&\"' \
-                        % (brbe, local_bitbake, bbtargets, log, bitbake)],
+        self._shellcmd(['%s bash -c \"(TOASTER_BRBE="%s" BBSERVER="0.0.0.0:%s" '
+                        '%s %s -u toasterui  --read %s --read %s --token="" >>%s 2>&1;'
+                        'BITBAKE_UI="knotty" BBSERVER=0.0.0.0:%s %s -m)&\"' \
+                        % (env_clean, brbe, self.be.bbport, local_bitbake, bbtargets, confpath, toasterlayers, log,
+                        self.be.bbport, bitbake,)],
                         builddir, nowait=True)
 
         logger.debug('localhostbecontroller: Build launched, exiting. '

+ 7 - 4
bitbake/lib/toaster/bldcontrol/management/commands/checksettings.py

@@ -1,4 +1,4 @@
-from django.core.management.base import NoArgsCommand, CommandError
+from django.core.management.base import BaseCommand, CommandError
 from django.db import transaction
 
 from django.core.management import call_command
@@ -18,7 +18,7 @@ def DN(path):
         return os.path.dirname(path)
 
 
-class Command(NoArgsCommand):
+class Command(BaseCommand):
     args = ""
     help = "Verifies that the configured settings are valid and usable, or prompts the user to fix the settings."
 
@@ -75,7 +75,10 @@ class Command(NoArgsCommand):
                         call_command("loaddata", "settings")
                         template_conf = os.environ.get("TEMPLATECONF", "")
 
-                        if "poky" in template_conf:
+                        if ToasterSetting.objects.filter(name='CUSTOM_XML_ONLY').count() > 0:
+                            # only use the custom settings
+                            pass
+                        elif "poky" in template_conf:
                             print("Loading poky configuration")
                             call_command("loaddata", "poky")
                         else:
@@ -152,7 +155,7 @@ class Command(NoArgsCommand):
 
 
 
-    def handle_noargs(self, **options):
+    def handle(self, **options):
         retval = 0
         retval += self._verify_build_environment()
         retval += self._verify_default_settings()

+ 25 - 10
bitbake/lib/toaster/bldcontrol/management/commands/runbuilds.py

@@ -1,4 +1,4 @@
-from django.core.management.base import NoArgsCommand
+from django.core.management.base import BaseCommand
 from django.db import transaction
 from django.db.models import Q
 
@@ -11,10 +11,12 @@ from orm.models import Build, LogMessage, Target
 import logging
 import traceback
 import signal
+import os
 
 logger = logging.getLogger("toaster")
 
-class Command(NoArgsCommand):
+
+class Command(BaseCommand):
     args = ""
     help = "Schedules and executes build requests as possible. "\
            "Does not return (interrupt with Ctrl-C)"
@@ -50,7 +52,7 @@ class Command(NoArgsCommand):
                 logger.debug("runbuilds: No build env")
                 return
 
-            logger.info("runbuilds: starting build %s, environment %s" % \
+            logger.info("runbuilds: starting build %s, environment %s" %
                         (br, bec.be))
 
             # let the build request know where it is being executed
@@ -77,10 +79,18 @@ class Command(NoArgsCommand):
             br.save()
             bec.be.lock = BuildEnvironment.LOCK_FREE
             bec.be.save()
+            # Cancel the pending build and report the exception to the UI
+            log_object = LogMessage.objects.create(
+                            build = br.build,
+                            level = LogMessage.EXCEPTION,
+                            message = errmsg)
+            log_object.save()
+            br.build.outcome = Build.FAILED
+            br.build.save()
 
     def archive(self):
         for br in BuildRequest.objects.filter(state=BuildRequest.REQ_ARCHIVE):
-            if br.build == None:
+            if br.build is None:
                 br.state = BuildRequest.REQ_FAILED
             else:
                 br.state = BuildRequest.REQ_COMPLETED
@@ -99,10 +109,10 @@ class Command(NoArgsCommand):
             Q(updated__lt=timezone.now() - timedelta(seconds=30))
         ).update(lock=BuildEnvironment.LOCK_FREE)
 
-
         # update all Builds that were in progress and failed to start
-        for br in BuildRequest.objects.filter(state=BuildRequest.REQ_FAILED,
-                                              build__outcome=Build.IN_PROGRESS):
+        for br in BuildRequest.objects.filter(
+                state=BuildRequest.REQ_FAILED,
+                build__outcome=Build.IN_PROGRESS):
             # transpose the launch errors in ToasterExceptions
             br.build.outcome = Build.FAILED
             for brerror in br.brerror_set.all():
@@ -117,7 +127,6 @@ class Command(NoArgsCommand):
             br.environment.lock = BuildEnvironment.LOCK_FREE
             br.environment.save()
 
-
         # update all BuildRequests without a build created
         for br in BuildRequest.objects.filter(build=None):
             br.build = Build.objects.create(project=br.project,
@@ -144,7 +153,7 @@ class Command(NoArgsCommand):
 
         # Make sure the LOCK is removed for builds which have been fully
         # cancelled
-        for br in BuildRequest.objects.filter(\
+        for br in BuildRequest.objects.filter(
                       Q(build__outcome=Build.CANCELLED) &
                       Q(state=BuildRequest.REQ_CANCELLING) &
                       ~Q(environment=None)):
@@ -167,7 +176,13 @@ class Command(NoArgsCommand):
         except Exception as e:
             logger.warn("runbuilds: schedule exception %s" % str(e))
 
-    def handle_noargs(self, **options):
+    def handle(self, **options):
+        pidfile_path = os.path.join(os.environ.get("BUILDDIR", "."),
+                                    ".runbuilds.pid")
+
+        with open(pidfile_path, 'w') as pidfile:
+            pidfile.write("%s" % os.getpid())
+
         self.runbuild()
 
         signal.signal(signal.SIGUSR1, lambda sig, frame: None)

+ 0 - 141
bitbake/lib/toaster/bldcontrol/tests.py

@@ -1,141 +0,0 @@
-"""
-This file demonstrates writing tests using the unittest module. These will pass
-when you run "manage.py test".
-
-Replace this with more appropriate tests for your application.
-"""
-
-from django.test import TestCase
-
-from bldcontrol.bbcontroller import BitbakeController, BuildSetupException
-from bldcontrol.localhostbecontroller import LocalhostBEController
-from bldcontrol.models import BuildEnvironment, BuildRequest
-from bldcontrol.management.commands.runbuilds import Command
-
-import socket
-import subprocess
-import os
-
-# standard poky data hardcoded for testing
-BITBAKE_LAYER = type('bitbake_info', (object,), { "giturl": "git://git.yoctoproject.org/poky.git", "dirpath": "", "commit": "HEAD"})
-POKY_LAYERS = [
-    type('poky_info', (object,), { "name": "meta", "giturl": "git://git.yoctoproject.org/poky.git", "dirpath": "meta", "commit": "HEAD"}),
-    type('poky_info', (object,), { "name": "meta-yocto", "giturl": "git://git.yoctoproject.org/poky.git", "dirpath": "meta-yocto", "commit": "HEAD"}),
-    type('poky_info', (object,), { "name": "meta-yocto-bsp", "giturl": "git://git.yoctoproject.org/poky.git", "dirpath": "meta-yocto-bsp", "commit": "HEAD"}),
-    ]
-
-
-
-# we have an abstract test class designed to ensure that the controllers use a single interface
-# specific controller tests only need to override the _getBuildEnvironment() method
-
-test_sourcedir = os.getenv("TTS_SOURCE_DIR")
-test_builddir = os.getenv("TTS_BUILD_DIR")
-test_address = os.getenv("TTS_TEST_ADDRESS", "localhost")
-
-if test_sourcedir == None or test_builddir == None or test_address == None:
-    raise Exception("Please set TTTS_SOURCE_DIR, TTS_BUILD_DIR and TTS_TEST_ADDRESS")
-
-# The bb server will expect a toaster-pre.conf file to exist. If it doesn't exit then we make
-# an empty one here.
-open(test_builddir + 'conf/toaster-pre.conf', 'a').close()
-
-class BEControllerTests(object):
-
-    def _serverForceStop(self, bc):
-        err = bc._shellcmd("netstat  -tapn 2>/dev/null | grep 8200 | awk '{print $7}' | sort -fu | cut -d \"/\" -f 1 | grep -v -- - | tee /dev/fd/2 | xargs -r kill")
-        self.assertTrue(err == '', "bitbake server pid %s not stopped" % err)
-
-    def test_serverStartAndStop(self):
-        obe =  self._getBuildEnvironment()
-        bc = self._getBEController(obe)
-        try:
-            # setting layers, skip any layer info
-            bc.setLayers(BITBAKE_LAYER, POKY_LAYERS)
-        except NotImplementedError:
-            print("Test skipped due to command not implemented yet")
-            return True
-        # We are ok with the exception as we're handling the git already exists
-        except BuildSetupException:
-            pass
-
-        bc.pokydirname = test_sourcedir
-        bc.islayerset = True
-
-        hostname = test_address.split("@")[-1]
-
-        # test start server and stop
-        bc.startBBServer()
-
-        self.assertFalse(socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex((hostname, int(bc.be.bbport))), "Server not answering")
-
-        self._serverForceStop(bc)
-
-    def test_getBBController(self):
-        obe = self._getBuildEnvironment()
-        bc = self._getBEController(obe)
-        layerSet = False
-        try:
-            # setting layers, skip any layer info
-            layerSet = bc.setLayers(BITBAKE_LAYER, POKY_LAYERS)
-        except NotImplementedError:
-            print("Test skipped due to command not implemented yet")
-            return True
-        # We are ok with the exception as we're handling the git already exists
-        except BuildSetupException:
-            pass
-
-        bc.pokydirname = test_sourcedir
-        bc.islayerset = True
-
-        bbc = bc.getBBController()
-        self.assertTrue(isinstance(bbc, BitbakeController))
-
-        self._serverForceStop(bc)
-
-class LocalhostBEControllerTests(TestCase, BEControllerTests):
-    def __init__(self, *args):
-        super(LocalhostBEControllerTests, self).__init__(*args)
-
-
-    def _getBuildEnvironment(self):
-        return BuildEnvironment.objects.create(
-                lock = BuildEnvironment.LOCK_FREE,
-                betype = BuildEnvironment.TYPE_LOCAL,
-                address = test_address,
-                sourcedir = test_sourcedir,
-                builddir = test_builddir )
-
-    def _getBEController(self, obe):
-        return LocalhostBEController(obe)
-
-class RunBuildsCommandTests(TestCase):
-    def test_bec_select(self):
-        """
-        Tests that we can find and lock a build environment
-        """
-
-        obe = BuildEnvironment.objects.create(lock = BuildEnvironment.LOCK_FREE, betype = BuildEnvironment.TYPE_LOCAL)
-        command = Command()
-        bec = command._selectBuildEnvironment()
-
-        # make sure we select the object we've just built
-        self.assertTrue(bec.be.id == obe.id, "Environment is not properly selected")
-        # we have a locked environment
-        self.assertTrue(bec.be.lock == BuildEnvironment.LOCK_LOCK, "Environment is not locked")
-        # no more selections possible here
-        self.assertRaises(IndexError, command._selectBuildEnvironment)
-
-    def test_br_select(self):
-        from orm.models import Project, Release, BitbakeVersion, Branch
-        p = Project.objects.create_project("test", Release.objects.get_or_create(name = "HEAD", bitbake_version = BitbakeVersion.objects.get_or_create(name="HEAD", branch=Branch.objects.get_or_create(name="HEAD"))[0])[0])
-        obr = BuildRequest.objects.create(state = BuildRequest.REQ_QUEUED, project = p)
-        command = Command()
-        br = command._selectBuildRequest()
-
-        # make sure we select the object we've just built
-        self.assertTrue(obr.id == br.id, "Request is not properly selected")
-        # we have a locked environment
-        self.assertTrue(br.state == BuildRequest.REQ_INPROGRESS, "Request is not updated")
-        # no more selections possible here
-        self.assertRaises(IndexError, command._selectBuildRequest)

+ 0 - 6
bitbake/lib/toaster/contrib/README

@@ -1,6 +0,0 @@
-contrib directory for toaster
-
-This directory holds code that works with Toaster, without being an integral part of the Toaster project.
-It is intended for testing code, testing fixtures, tools for Toaster, etc.
-
-NOTE: This directory is NOT a Python module.

+ 0 - 41
bitbake/lib/toaster/contrib/tts/README

@@ -1,41 +0,0 @@
-
-Toaster Testing Framework
-Yocto Project
-
-
-Rationale
-------------
-As Toaster contributions grow with the number of people that contribute code, verifying each patch prior to submitting upstream becomes a hard-to-scale problem for humans. We devised this system in order to run patch-level validation, trying to eliminate common problems from submitted patches, in an automated fashion.
-
-The Toaster Testing Framework is a set of Python scripts that provides an extensible way to write smoke and regression tests that will be run on each patch set sent for review on the toaster mailing list.
-
-
-Usage
-------------
-There are three main executable scripts in this directory.
-    *    runner.py  is designed to be run from the command line. It requires, as mandatory parameter, a branch name on poky-contrib, branch which contains the patches to be tested. The program will auto-discover the available tests residing in this directory by looking for unittest classes, and will run the tests on the branch dumping the output to the standard output. Optionally, it can take parameters inhibiting the branch checkout, or specifying a single test to be run, for debugging purposes.
-    *    launcher.py  is a designed to be run from a crontab or similar scheduling mechanism. It looks up a backlog file containing branches-to-test (named tasks in the source code), select the first one in FIFO manner, and launch runner.py on it. It will await for completion, and email the standard output and standard error dumps from the runner.py execution
-    *    recv.py  is an email receiver, designed to be called as a pipe from a .forward file. It is used to monitor a mailing list, for example, and add tasks to the backlog based on review requests coming on the mailing list.
-
-
-Installation
-------------
-As prerequisite, we expect a functioning email system on a machine with Python2.
-
-The broad steps to installation
-* set up the .forward on the receiving email account to pipe to the recv.py file
-* edit config.py and settings.json to alter for local installation settings
-* on email receive, verify backlog.txt to see that the tasks are received and marked for processing
-* execute launcher.py in command line to verify that a test occurs with no problems, and that the outgoing email is delivered
-* add launcher.py
-
-
-
-Contribute
-------------
-What we need are tests. Add your own tests to either tests.py file, or to a new file.
-Use "config.logger" to write logs that will make it to email.
-
-Commonly used code should be going to shellutils, and configuration to config.py.
-
-Contribute code by emailing patches to the list: toaster@yoctoproject.org (membership required)

+ 0 - 9
bitbake/lib/toaster/contrib/tts/TODO

@@ -1,9 +0,0 @@
-We need to implement tests:
-
-automated link checker; currently
-$ linkchecker -t 1000 -F csv http://localhost:8000/
-
-integrate the w3c-validation service; currently
-$ python urlcheck.py
-
-

+ 0 - 98
bitbake/lib/toaster/contrib/tts/config.py

@@ -1,98 +0,0 @@
-#!/usr/bin/python
-
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (C) 2015 Alexandru Damian for Intel Corp.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-# This is the configuration/single module for tts
-# everything that would be a global variable goes here
-
-import os, sys, logging
-import socket
-
-LOGDIR = "log"
-SETTINGS_FILE = os.path.join(os.path.dirname(__file__), "settings.json")
-TEST_DIR_NAME = "tts_testdir"
-
-DEBUG = True
-
-OWN_PID = os.getpid()
-
-W3C_VALIDATOR = "http://icarus.local/w3c-validator/check?doctype=HTML5&uri="
-
-TOASTER_PORT = 56789
-
-TESTDIR = None
-
-#we parse the w3c URL to know where to connect
-
-import urlparse
-
-def get_public_ip():
-    temp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    parsed_url = urlparse.urlparse("http://icarus.local/w3c-validator/check?doctype=HTML5&uri=")
-    temp_socket.connect((parsed_url.netloc, 80 if parsed_url.port is None else parsed_url.port))
-    public_ip = temp_socket.getsockname()[0]
-    temp_socket.close()
-    return public_ip
-
-TOASTER_BASEURL = "http://%s:%d/" % (get_public_ip(), TOASTER_PORT)
-
-
-OWN_EMAIL_ADDRESS = "Toaster Testing Framework <alexandru.damian@intel.com>"
-REPORT_EMAIL_ADDRESS = "alexandru.damian@intel.com"
-
-# make sure we have the basic logging infrastructure
-
-#pylint: disable=invalid-name
-# we disable the invalid name because the module-level "logger" is used througout bitbake
-logger = logging.getLogger("toastertest")
-__console__ = logging.StreamHandler(sys.stdout)
-__console__.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s"))
-logger.addHandler(__console__)
-logger.setLevel(logging.DEBUG)
-
-
-# singleton file names
-LOCKFILE = "/tmp/ttf.lock"
-BACKLOGFILE = os.path.join(os.path.dirname(__file__), "backlog.txt")
-
-# task states
-def enum(*sequential, **named):
-    enums = dict(zip(sequential, range(len(sequential))), **named)
-    reverse = dict((value, key) for key, value in enums.items())
-    enums['reverse_mapping'] = reverse
-    return type('Enum', (), enums)
-
-
-class TASKS(object):
-    #pylint: disable=too-few-public-methods
-    PENDING = "PENDING"
-    INPROGRESS = "INPROGRESS"
-    DONE = "DONE"
-
-    @staticmethod
-    def next_task(task):
-        if task == TASKS.PENDING:
-            return TASKS.INPROGRESS
-        if task == TASKS.INPROGRESS:
-            return TASKS.DONE
-        raise Exception("Invalid next task state for %s" % task)
-
-# TTS specific
-CONTRIB_REPO = "git@git.yoctoproject.org:poky-contrib"
-

+ 0 - 101
bitbake/lib/toaster/contrib/tts/launcher.py

@@ -1,101 +0,0 @@
-#!/usr/bin/python
-
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (C) 2015 Alexandru Damian for Intel Corp.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-# Program to run the next task listed from the backlog.txt; designed to be
-# run from crontab.
-
-from __future__ import print_function
-import sys, os, config, shellutils
-from shellutils import ShellCmdException
-
-# Import smtplib for the actual sending function
-import smtplib
-
-# Import the email modules we'll need
-from email.mime.text import MIMEText
-
-def _take_lockfile():
-    return shellutils.lockfile(shellutils.mk_lock_filename())
-
-
-def read_next_task_by_state(task_state, task_name=None):
-    if not os.path.exists(os.path.join(os.path.dirname(__file__), config.BACKLOGFILE)):
-        return None
-    os.rename(config.BACKLOGFILE, config.BACKLOGFILE + ".tmp")
-    task = None
-    with open(config.BACKLOGFILE + ".tmp", "r") as f_in:
-        with open(config.BACKLOGFILE, "w") as f_out:
-            for line in f_in.readlines():
-                if task is None:
-                    fields = line.strip().split("|", 2)
-                    if fields[1] == task_state:
-                        if task_name is None or task_name == fields[0]:
-                            task = fields[0]
-                            print("Updating %s %s to %s" % (task, task_state, config.TASKS.next_task(task_state)))
-                            line = "%s|%s\n" % (task, config.TASKS.next_task(task_state))
-                f_out.write(line)
-    os.remove(config.BACKLOGFILE + ".tmp")
-    return task
-
-def send_report(task_name, plaintext, errtext=None):
-    if errtext is None:
-        msg = MIMEText(plaintext)
-    else:
-        if plaintext is None:
-            plaintext = ""
-        msg = MIMEText("--STDOUT dump--\n\n%s\n\n--STDERR dump--\n\n%s" % (plaintext, errtext))
-
-    msg['Subject'] = "[review-request] %s - smoke test results" % task_name
-    msg['From'] = config.OWN_EMAIL_ADDRESS
-    msg['To'] = config.REPORT_EMAIL_ADDRESS
-
-    smtp_connection = smtplib.SMTP("localhost")
-    smtp_connection.sendmail(config.OWN_EMAIL_ADDRESS, [config.REPORT_EMAIL_ADDRESS], msg.as_string())
-    smtp_connection.quit()
-
-def main():
-    # we don't do anything if we have another instance of us running
-    lock_file = _take_lockfile()
-
-    if lock_file is None:
-        if config.DEBUG:
-            print("Concurrent script in progress, exiting")
-        sys.exit(1)
-
-    next_task = read_next_task_by_state(config.TASKS.PENDING)
-    if next_task is not None:
-        print("Next task is", next_task)
-        errtext = None
-        out = None
-        try:
-            out = shellutils.run_shell_cmd("%s %s" % (os.path.join(os.path.dirname(__file__), "runner.py"), next_task))
-        except ShellCmdException as exc:
-            print("Failed while running the test runner: %s", exc)
-            errtext = exc.__str__()
-        send_report(next_task, out, errtext)
-        read_next_task_by_state(config.TASKS.INPROGRESS, next_task)
-    else:
-        print("No task")
-
-    shellutils.unlockfile(lock_file)
-
-
-if __name__ == "__main__":
-    main()

Some files were not shown because too many files changed in this diff