[RFC,4/5] meta: add mounts class

Message ID 20211012130413.1719424-5-adriaan.schmidt@siemens.com
State RFC
Headers show
Series Refactor mount logic | expand

Commit Message

Schmidt, Adriaan Oct. 12, 2021, 4:04 a.m. UTC
The new mounts.bbclass allows annotation of tasks, to describe which directories
need to be mounted. All mounting and unmounting is then done automatically,
and reference counting is used on a per-mountpoint basis to determine when
umounts need to happen.

Mounts are described as "cmd:src:dest", where cmd is
  * `bind` for a simple bind mount
  * `rbind` for a recursive bind mount
  * `pbind` for a "private" bind mount
  * `proc` for a "-t proc" mount

A task is annotated using the varflag [mounts].

If mounting should not happen automatically before/after the task, you can set
do_task[mounts-noauto] = "1", in which case you can manually call
`mount_task_prefunc` and `mount_task_postfunc` at more convenient times.

Signed-off-by: Adriaan Schmidt <adriaan.schmidt@siemens.com>
---
 meta/classes/mounts.bbclass | 271 ++++++++++++++++++++++++++++++++++++
 meta/conf/bitbake.conf      |   2 +-
 2 files changed, 272 insertions(+), 1 deletion(-)
 create mode 100644 meta/classes/mounts.bbclass

Comments

Jan Kiszka Oct. 13, 2021, 1:31 a.m. UTC | #1
On 12.10.21 15:04, Adriaan Schmidt wrote:
> The new mounts.bbclass allows annotation of tasks, to describe which directories
> need to be mounted. All mounting and unmounting is then done automatically,
> and reference counting is used on a per-mountpoint basis to determine when
> umounts need to happen.
> 
> Mounts are described as "cmd:src:dest", where cmd is
>   * `bind` for a simple bind mount
>   * `rbind` for a recursive bind mount
>   * `pbind` for a "private" bind mount
>   * `proc` for a "-t proc" mount
> 
> A task is annotated using the varflag [mounts].
> 
> If mounting should not happen automatically before/after the task, you can set
> do_task[mounts-noauto] = "1", in which case you can manually call
> `mount_task_prefunc` and `mount_task_postfunc` at more convenient times.
> 
> Signed-off-by: Adriaan Schmidt <adriaan.schmidt@siemens.com>
> ---
>  meta/classes/mounts.bbclass | 271 ++++++++++++++++++++++++++++++++++++
>  meta/conf/bitbake.conf      |   2 +-
>  2 files changed, 272 insertions(+), 1 deletion(-)
>  create mode 100644 meta/classes/mounts.bbclass
> 
> diff --git a/meta/classes/mounts.bbclass b/meta/classes/mounts.bbclass
> new file mode 100644
> index 0000000..de2375e
> --- /dev/null
> +++ b/meta/classes/mounts.bbclass
> @@ -0,0 +1,271 @@
> +
> +python () {
> +    # find all tasks that request [mounts], and hook up our functions
> +    for task in [t for t in d.keys() if d.getVarFlag(t, 'task') and d.getVarFlag(t, 'mounts')]:
> +        if d.getVarFlag(task, 'mounts-noauto') == "1":
> +            continue
> +        d.prependVarFlag(task, 'prefuncs', "mounts_task_prefunc ")
> +        d.appendVarFlag(task, 'postfuncs', " mounts_task_postfunc")
> +}
> +
> +MOUNTS_DB = "${TMPDIR}/mounts"
> +MOUNTS_CONTEXT ?= "default"
> +MOUNTS_LOCK = "${MOUNTS_DB}/${MOUNTS_CONTEXT}.mountlock"
> +MOUNTS_TAB = "${MOUNTS_DB}/${MOUNTS_CONTEXT}.mounttab"
> +
> +def get_requested_mounts(d, task=None):
> +    if task is None:
> +        task = d.getVar('BB_CURRENTTASK')
> +        if not task:
> +            bb.fatal("mount code running without task context!?")
> +    if task.startswith("do_"):
> +        task = task[3:]
> +    mounts = (d.getVarFlag("do_" + task, 'mounts') or "").split()

You first strip the do_ prefix, only to prepend it again. Is that
indended? If so, maybe flip it around and prepend this if missing.

> +    mounts_out = []
> +    for m in mounts:
> +        ms = m.split(':')
> +        if len(ms) == 3 and ms[0] in 'bind rbind pbind proc'.split():
> +            mounts_out.append(ms)
> +        else:
> +            bb.error(f"Invalid mount spec: {':'.join(ms)}")
> +    return mounts_out
> +
> +def read_mtab(d, mtab_file=None):
> +    from collections import namedtuple
> +    Mount = namedtuple('Mount', 'cmd source target count')
> +    if mtab_file is None:
> +        mtab_file = d.getVar("MOUNTS_TAB", True)
> +    # mtab format is "cmd:source:target:count"
> +    try:
> +        with open(mtab_file, 'r') as f:
> +            data = [line.strip().split(':') for line in f.readlines()]
> +    except FileNotFoundError:
> +        return {}
> +    mounts = {}
> +    for m in data:
> +        if not len(m) == 4:
> +            bb.fatal("corrupt mtab!?")
> +        mt = Mount._make(m)
> +        mounts[mt.target] = mt._replace(count=int(mt.count))
> +    return mounts
> +
> +def write_mtab(d, mtab, mtab_file=None):
> +    if mtab_file is None:
> +        mtab_file = d.getVar("MOUNTS_TAB", True)
> +    with open(mtab_file, 'w') as f:
> +        for cmd, source, target, count in mtab.values():
> +            f.write(f"{cmd}:{source}:{target}:{count}\n")
> +
> +def shorten_path(x, n=3):
> +    xs = x.split('/')
> +    if len(xs) <= n:
> +        return '/'.join(xs)
> +    return '.../'+'/'.join(xs[-3:])

Hope this does not cut off any differentiating information, even when
just targeting logs.

> +
> +mount_bind() {
> +    sudo -s <<'EOSUDO'
> +        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
> +        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
> +        mkdir -p "$TARGET"
> +        mountpoint -q "$TARGET" || mount --bind "$SOURCE" "$TARGET"
> +EOSUDO
> +}
> +
> +umount_bind() {
> +    sudo -s <<'EOSUDO'
> +        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
> +        mountpoint -q "$TARGET" && umount "$TARGET"
> +EOSUDO
> +}
> +
> +mount_rbind() {
> +    sudo -s <<'EOSUDO'
> +        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
> +        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
> +        mkdir -p "$TARGET"
> +        mountpoint -q "$TARGET" || mount --rbind "$SOURCE" "$TARGET"
> +        mount --make-rslave "$TARGET"
> +EOSUDO
> +}
> +
> +umount_rbind() {
> +    sudo -s <<'EOSUDO'
> +        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
> +        mountpoint -q "$TARGET" && umount -R "$TARGET"
> +EOSUDO
> +}
> +
> +mount_pbind() {
> +    sudo -s <<'EOSUDO'
> +        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
> +        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
> +        mkdir -p "$TARGET"
> +        mountpoint -q "$TARGET" || mount --bind --make-private "$SOURCE" "$TARGET"
> +EOSUDO
> +}
> +
> +umount_pbind() {
> +    sudo -s <<'EOSUDO'
> +        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
> +        mountpoint -q "$TARGET" && umount "$TARGET"
> +EOSUDO
> +}
> +
> +mount_proc() {
> +    sudo -s <<'EOSUDO'
> +        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
> +        mkdir -p "$TARGET"
> +        mountpoint -q "$TARGET" || mount -t proc none "$TARGET"
> +EOSUDO
> +}
> +
> +umount_proc() {
> +    sudo -s <<'EOSUDO'
> +        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
> +        mountpoint -q "$TARGET" && umount "$TARGET"
> +EOSUDO
> +}
> +
> +python mounts_task_prefunc () {
> +    from collections import namedtuple
> +    Mount = namedtuple('Mount', 'cmd source target count')
> +    task = d.getVar('PN') + ':' + d.getVar('BB_CURRENTTASK')
> +    lock = bb.utils.lockfile(d.getVar("MOUNTS_LOCK"))
> +    mounts = get_requested_mounts(d)
> +    mtab = read_mtab(d)
> +    for cmd, source, target in mounts:
> +        mt = mtab.get(target)
> +        if mt:
> +            count = mt.count + 1
> +            bb.debug(1, f"mount({task}): already mounted {shorten_path(mt.source)} at {shorten_path(mt.target)}, cnt={count}")
> +            mtab[target] = mt._replace(count=count)
> +            continue
> +        bb.debug(1, f"mount({task}): mounting {shorten_path(source)} at {shorten_path(target)}, cnt=1")
> +        d.setVar('MOUNT_ARG_SOURCE', source)
> +        d.setVar('MOUNT_ARG_TARGET', target)
> +        bb.build.exec_func('mount_' + cmd, d)

Could this fail and leave the lock blocked behind?

> +        mtab[target] = Mount(cmd, source, target, 1)
> +    write_mtab(d, mtab)
> +    bb.utils.unlockfile(lock)
> +}
> +
> +python mounts_task_postfunc () {
> +    task = d.getVar('PN') + ':' + d.getVar('BB_CURRENTTASK')
> +    lock = bb.utils.lockfile(d.getVar("MOUNTS_LOCK"))
> +    mounts = get_requested_mounts(d)
> +    mtab = read_mtab(d)
> +
> +    # release mounts
> +    for cmd, source, target in mounts:
> +        mt = mtab.get(target)
> +        if mt is None:
> +            bb.error(f"{target} not mounted. inconsistent mtab!?")
> +            continue
> +        count = mt.count - 1
> +        bb.debug(1, f"umount({task}): releasing {shorten_path(target)}, cnt={count}")
> +        mtab[target] = mt._replace(count=count)
> +
> +    # collect targets to unmount, in reverse order
> +    umounts = []
> +    for cmd, source, target in reversed(mounts):
> +        mt = mtab.get(target)
> +        if mt and mt.count == 0:
> +            umounts.append(target)
> +    for target, mt in mtab.items():
> +        if mt.count < 0:
> +            bb.error("count on {target} < 0. BUG!?!")
> +        elif mt.count == 0 and not target in umounts:
> +            umounts.append(target)
> +
> +    # now do the unmounting
> +    for target in umounts:
> +        try:
> +            bb.debug(1, f"umount({task}): unmounting {shorten_path(target)}")
> +            d.setVar('UMOUNT_ARG_TARGET', target)
> +            bb.build.exec_func('umount_' + mt.cmd, d)
> +            del mtab[target]
> +        except bb.process.ExecutionError as e:
> +            if e.exitcode == 32:
> +                # target busy
> +                bb.debug(1, f"umount({task}): target busy, moving on...")
> +            else:
> +                bb.warn(f"umount({task}): failed to unmount {target}: {str(e)}")
> +
> +    write_mtab(d, mtab)
> +    bb.utils.unlockfile(lock)
> +}
> +
> +# call postfunc explicitly in case a failing task has [mounts]
> +addhandler mounts_taskfail
> +python mounts_taskfail() {
> +    task = d.getVar('BB_CURRENTTASK')
> +    if not task:
> +        bb.fatal("mount code running without task context!?")
> +    if task.startswith("do_"):
> +        task = task[3:]
> +    if d.getVarFlag("do_" + task, 'mounts') and not d.getVarFlag("do_" + task, 'mounts-noauto') == "1":
> +        bb.build.exec_func('mounts_task_postfunc', d)
> +}
> +mounts_taskfail[eventmask] = "bb.build.TaskFailed"
> +
> +# bb.event.Build* handlers don't have a task context.
> +# Don't access MOUNTS_CONTEXT from here!
> +addhandler mounts_init
> +python mounts_init() {
> +    bb.utils.remove(d.getVar('MOUNTS_DB') + "/*.mounttab")
> +    bb.utils.remove(d.getVar('MOUNTS_DB') + "/*.mountlock")
> +}
> +mounts_init[eventmask] = "bb.event.BuildStarted"
> +
> +addhandler mounts_cleanup
> +python mounts_cleanup() {
> +    # look through MOUNTS_DB for contexts
> +    import glob
> +    import time
> +    base = d.getVar('MOUNTS_DB')
> +    locks = glob.glob(base + "/*.mountlock")
> +    tabs = glob.glob(base + "/*.mounttab")
> +
> +    # there should not be any locks?
> +    if len(locks) > 0:
> +        bb.error(f"mounts_cleanup: someone still holding lock? ({str(locks)})")
> +
> +    # cleanup any existing contexts
> +    for mtab_file in tabs:
> +        mtab = read_mtab(d, mtab_file)
> +        if len(mtab) > 0:
> +            bb.note(f"mounts_cleanup: {mtab_file.split('/')[-1]}")
> +
> +        done = []
> +        for target, mt in mtab.items():
> +            if mt.count < 0:
> +                bb.error("count on {target} < 0. BUG!?!")
> +                continue
> +            if mt.count > 0:
> +                bb.error(f"cound on {target} > 0. BUG!?!")
> +
> +            bb.note(f"mounts_cleanup: unmounting {target}")
> +            for i in range(10):
> +                try:
> +                    d.setVar('UMOUNT_ARG_TARGET', target)
> +                    bb.build.exec_func('umount_' + mt.cmd, d)
> +                    done.append(target)
> +                    break
> +                except bb.process.ExecutionError as e:
> +                    if e.exitcode == 32:
> +                        # target busy
> +                        time.sleep(1)
> +                        continue
> +                    else:
> +                        bb.error(f"umount({task}): {str(e)}")
> +                        done.append(target)
> +                        break
> +                bb.warn(f"mounts_cleanup: failed to umount {target}")
> +                done.append(target)
> +
> +        for target in done:
> +            del mtab[target]
> +        write_mtab(d, mtab, mtab_file)
> +}
> +
> +mounts_cleanup[eventmask] = "bb.event.BuildCompleted"
> diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
> index 7f5901d..4726eaf 100644
> --- a/meta/conf/bitbake.conf
> +++ b/meta/conf/bitbake.conf
> @@ -113,7 +113,7 @@ PARALLEL_MAKE ?= "-j ${@bb.utils.cpu_count()}"
>  BBINCLUDELOGS ??= "yes"
>  
>  # Add event handlers for bitbake
> -INHERIT += "isar-events"
> +INHERIT += "mounts isar-events"
>  
>  include conf/local.conf
>  include conf/multiconfig/${BB_CURRENT_MC}.conf
> 

With all this in place, did you see warning in build_completed() triggering?

Jan
Schmidt, Adriaan Oct. 19, 2021, 10:02 p.m. UTC | #2
On 2021-10-13 12:31, Jan Kiszka wrote:
> On 12.10.21 15:04, Adriaan Schmidt wrote:
> > +def get_requested_mounts(d, task=None):
> > +    if task is None:
> > +        task = d.getVar('BB_CURRENTTASK')
> > +        if not task:
> > +            bb.fatal("mount code running without task context!?")
> > +    if task.startswith("do_"):
> > +        task = task[3:]
> > +    mounts = (d.getVarFlag("do_" + task, 'mounts') or "").split()
> 
> You first strip the do_ prefix, only to prepend it again. Is that
> indended? If so, maybe flip it around and prepend this if missing.

This is how OE's sstate.bbclass does it. I agree it's ugly, but turning it around does not look much better either.
It's about accepting task names with and without "do_", but I'm not even sure both cases can happen here. I'll have another look an clean this up.

> > +def shorten_path(x, n=3):
> > +    xs = x.split('/')
> > +    if len(xs) <= n:
> > +        return '/'.join(xs)
> > +    return '.../'+'/'.join(xs[-3:])
> 
> Hope this does not cut off any differentiating information, even when
> just targeting logs.

It might. During development it makes reading the prints much easier, but for proper logs we want the complete paths (will change for next version).

> > +python mounts_task_prefunc () {
> > +    from collections import namedtuple
> > +    Mount = namedtuple('Mount', 'cmd source target count')
> > +    task = d.getVar('PN') + ':' + d.getVar('BB_CURRENTTASK')
> > +    lock = bb.utils.lockfile(d.getVar("MOUNTS_LOCK"))
> > +    mounts = get_requested_mounts(d)
> > +    mtab = read_mtab(d)
> > +    for cmd, source, target in mounts:
> > +        mt = mtab.get(target)
> > +        if mt:
> > +            count = mt.count + 1
> > +            bb.debug(1, f"mount({task}): already mounted
> {shorten_path(mt.source)} at {shorten_path(mt.target)}, cnt={count}")
> > +            mtab[target] = mt._replace(count=count)
> > +            continue
> > +        bb.debug(1, f"mount({task}): mounting {shorten_path(source)} at
> {shorten_path(target)}, cnt=1")
> > +        d.setVar('MOUNT_ARG_SOURCE', source)
> > +        d.setVar('MOUNT_ARG_TARGET', target)
> > +        bb.build.exec_func('mount_' + cmd, d)
> 
> Could this fail and leave the lock blocked behind?

Oh yes, good point! This also needs a try/catch, just like the umounts.

> > +addhandler mounts_cleanup
> > +python mounts_cleanup() {
> > +    # look through MOUNTS_DB for contexts
> > +    import glob
> > +    import time
> > +    base = d.getVar('MOUNTS_DB')
> > +    locks = glob.glob(base + "/*.mountlock")
> > +    tabs = glob.glob(base + "/*.mounttab")
> > +
> > +    # there should not be any locks?
> > +    if len(locks) > 0:
> > +        bb.error(f"mounts_cleanup: someone still holding lock?
> ({str(locks)})")
> > +
> > +    # cleanup any existing contexts
> > +    for mtab_file in tabs:
> > +        mtab = read_mtab(d, mtab_file)
> > +        if len(mtab) > 0:
> > +            bb.note(f"mounts_cleanup: {mtab_file.split('/')[-1]}")
> > +
> > +        done = []
> > +        for target, mt in mtab.items():
> > +            if mt.count < 0:
> > +                bb.error("count on {target} < 0. BUG!?!")
> > +                continue
> > +            if mt.count > 0:
> > +                bb.error(f"cound on {target} > 0. BUG!?!")
> > +
> > +            bb.note(f"mounts_cleanup: unmounting {target}")
> > +            for i in range(10):
> > +                try:
> > +                    d.setVar('UMOUNT_ARG_TARGET', target)
> > +                    bb.build.exec_func('umount_' + mt.cmd, d)
> > +                    done.append(target)
> > +                    break
> > +                except bb.process.ExecutionError as e:
> > +                    if e.exitcode == 32:
> > +                        # target busy
> > +                        time.sleep(1)
> > +                        continue
> > +                    else:
> > +                        bb.error(f"umount({task}): {str(e)}")
> > +                        done.append(target)
> > +                        break
> > +                bb.warn(f"mounts_cleanup: failed to umount {target}")
> > +                done.append(target)
> > +
> > +        for target in done:
> > +            del mtab[target]
> > +        write_mtab(d, mtab, mtab_file)
> > +}
> > +
> > +mounts_cleanup[eventmask] = "bb.event.BuildCompleted"
> > diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
> > index 7f5901d..4726eaf 100644
> > --- a/meta/conf/bitbake.conf
> > +++ b/meta/conf/bitbake.conf
> > @@ -113,7 +113,7 @@ PARALLEL_MAKE ?= "-j ${@bb.utils.cpu_count()}"
> >  BBINCLUDELOGS ??= "yes"
> >
> >  # Add event handlers for bitbake
> > -INHERIT += "isar-events"
> > +INHERIT += "mounts isar-events"
> >
> >  include conf/local.conf
> >  include conf/multiconfig/${BB_CURRENT_MC}.conf
> >
> 
> With all this in place, did you see warning in build_completed() triggering?

In "normal" cases, the cleanup code of the new mounts class is already doing it's job. I have still seen issues in the ci tests (at least in the pre-avocado variant), which may be due to multiconfig and the concurrency/sharing it brings. This needs some more work on my side.

Adriaan

Patch

diff --git a/meta/classes/mounts.bbclass b/meta/classes/mounts.bbclass
new file mode 100644
index 0000000..de2375e
--- /dev/null
+++ b/meta/classes/mounts.bbclass
@@ -0,0 +1,271 @@ 
+
+python () {
+    # find all tasks that request [mounts], and hook up our functions
+    for task in [t for t in d.keys() if d.getVarFlag(t, 'task') and d.getVarFlag(t, 'mounts')]:
+        if d.getVarFlag(task, 'mounts-noauto') == "1":
+            continue
+        d.prependVarFlag(task, 'prefuncs', "mounts_task_prefunc ")
+        d.appendVarFlag(task, 'postfuncs', " mounts_task_postfunc")
+}
+
+MOUNTS_DB = "${TMPDIR}/mounts"
+MOUNTS_CONTEXT ?= "default"
+MOUNTS_LOCK = "${MOUNTS_DB}/${MOUNTS_CONTEXT}.mountlock"
+MOUNTS_TAB = "${MOUNTS_DB}/${MOUNTS_CONTEXT}.mounttab"
+
+def get_requested_mounts(d, task=None):
+    if task is None:
+        task = d.getVar('BB_CURRENTTASK')
+        if not task:
+            bb.fatal("mount code running without task context!?")
+    if task.startswith("do_"):
+        task = task[3:]
+    mounts = (d.getVarFlag("do_" + task, 'mounts') or "").split()
+    mounts_out = []
+    for m in mounts:
+        ms = m.split(':')
+        if len(ms) == 3 and ms[0] in 'bind rbind pbind proc'.split():
+            mounts_out.append(ms)
+        else:
+            bb.error(f"Invalid mount spec: {':'.join(ms)}")
+    return mounts_out
+
+def read_mtab(d, mtab_file=None):
+    from collections import namedtuple
+    Mount = namedtuple('Mount', 'cmd source target count')
+    if mtab_file is None:
+        mtab_file = d.getVar("MOUNTS_TAB", True)
+    # mtab format is "cmd:source:target:count"
+    try:
+        with open(mtab_file, 'r') as f:
+            data = [line.strip().split(':') for line in f.readlines()]
+    except FileNotFoundError:
+        return {}
+    mounts = {}
+    for m in data:
+        if not len(m) == 4:
+            bb.fatal("corrupt mtab!?")
+        mt = Mount._make(m)
+        mounts[mt.target] = mt._replace(count=int(mt.count))
+    return mounts
+
+def write_mtab(d, mtab, mtab_file=None):
+    if mtab_file is None:
+        mtab_file = d.getVar("MOUNTS_TAB", True)
+    with open(mtab_file, 'w') as f:
+        for cmd, source, target, count in mtab.values():
+            f.write(f"{cmd}:{source}:{target}:{count}\n")
+
+def shorten_path(x, n=3):
+    xs = x.split('/')
+    if len(xs) <= n:
+        return '/'.join(xs)
+    return '.../'+'/'.join(xs[-3:])
+
+mount_bind() {
+    sudo -s <<'EOSUDO'
+        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
+        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
+        mkdir -p "$TARGET"
+        mountpoint -q "$TARGET" || mount --bind "$SOURCE" "$TARGET"
+EOSUDO
+}
+
+umount_bind() {
+    sudo -s <<'EOSUDO'
+        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
+        mountpoint -q "$TARGET" && umount "$TARGET"
+EOSUDO
+}
+
+mount_rbind() {
+    sudo -s <<'EOSUDO'
+        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
+        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
+        mkdir -p "$TARGET"
+        mountpoint -q "$TARGET" || mount --rbind "$SOURCE" "$TARGET"
+        mount --make-rslave "$TARGET"
+EOSUDO
+}
+
+umount_rbind() {
+    sudo -s <<'EOSUDO'
+        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
+        mountpoint -q "$TARGET" && umount -R "$TARGET"
+EOSUDO
+}
+
+mount_pbind() {
+    sudo -s <<'EOSUDO'
+        SOURCE="${@d.getVar('MOUNT_ARG_SOURCE')}"
+        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
+        mkdir -p "$TARGET"
+        mountpoint -q "$TARGET" || mount --bind --make-private "$SOURCE" "$TARGET"
+EOSUDO
+}
+
+umount_pbind() {
+    sudo -s <<'EOSUDO'
+        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
+        mountpoint -q "$TARGET" && umount "$TARGET"
+EOSUDO
+}
+
+mount_proc() {
+    sudo -s <<'EOSUDO'
+        TARGET="${@d.getVar('MOUNT_ARG_TARGET')}"
+        mkdir -p "$TARGET"
+        mountpoint -q "$TARGET" || mount -t proc none "$TARGET"
+EOSUDO
+}
+
+umount_proc() {
+    sudo -s <<'EOSUDO'
+        TARGET="${@d.getVar('UMOUNT_ARG_TARGET')}"
+        mountpoint -q "$TARGET" && umount "$TARGET"
+EOSUDO
+}
+
+python mounts_task_prefunc () {
+    from collections import namedtuple
+    Mount = namedtuple('Mount', 'cmd source target count')
+    task = d.getVar('PN') + ':' + d.getVar('BB_CURRENTTASK')
+    lock = bb.utils.lockfile(d.getVar("MOUNTS_LOCK"))
+    mounts = get_requested_mounts(d)
+    mtab = read_mtab(d)
+    for cmd, source, target in mounts:
+        mt = mtab.get(target)
+        if mt:
+            count = mt.count + 1
+            bb.debug(1, f"mount({task}): already mounted {shorten_path(mt.source)} at {shorten_path(mt.target)}, cnt={count}")
+            mtab[target] = mt._replace(count=count)
+            continue
+        bb.debug(1, f"mount({task}): mounting {shorten_path(source)} at {shorten_path(target)}, cnt=1")
+        d.setVar('MOUNT_ARG_SOURCE', source)
+        d.setVar('MOUNT_ARG_TARGET', target)
+        bb.build.exec_func('mount_' + cmd, d)
+        mtab[target] = Mount(cmd, source, target, 1)
+    write_mtab(d, mtab)
+    bb.utils.unlockfile(lock)
+}
+
+python mounts_task_postfunc () {
+    task = d.getVar('PN') + ':' + d.getVar('BB_CURRENTTASK')
+    lock = bb.utils.lockfile(d.getVar("MOUNTS_LOCK"))
+    mounts = get_requested_mounts(d)
+    mtab = read_mtab(d)
+
+    # release mounts
+    for cmd, source, target in mounts:
+        mt = mtab.get(target)
+        if mt is None:
+            bb.error(f"{target} not mounted. inconsistent mtab!?")
+            continue
+        count = mt.count - 1
+        bb.debug(1, f"umount({task}): releasing {shorten_path(target)}, cnt={count}")
+        mtab[target] = mt._replace(count=count)
+
+    # collect targets to unmount, in reverse order
+    umounts = []
+    for cmd, source, target in reversed(mounts):
+        mt = mtab.get(target)
+        if mt and mt.count == 0:
+            umounts.append(target)
+    for target, mt in mtab.items():
+        if mt.count < 0:
+            bb.error("count on {target} < 0. BUG!?!")
+        elif mt.count == 0 and not target in umounts:
+            umounts.append(target)
+
+    # now do the unmounting
+    for target in umounts:
+        try:
+            bb.debug(1, f"umount({task}): unmounting {shorten_path(target)}")
+            d.setVar('UMOUNT_ARG_TARGET', target)
+            bb.build.exec_func('umount_' + mt.cmd, d)
+            del mtab[target]
+        except bb.process.ExecutionError as e:
+            if e.exitcode == 32:
+                # target busy
+                bb.debug(1, f"umount({task}): target busy, moving on...")
+            else:
+                bb.warn(f"umount({task}): failed to unmount {target}: {str(e)}")
+
+    write_mtab(d, mtab)
+    bb.utils.unlockfile(lock)
+}
+
+# call postfunc explicitly in case a failing task has [mounts]
+addhandler mounts_taskfail
+python mounts_taskfail() {
+    task = d.getVar('BB_CURRENTTASK')
+    if not task:
+        bb.fatal("mount code running without task context!?")
+    if task.startswith("do_"):
+        task = task[3:]
+    if d.getVarFlag("do_" + task, 'mounts') and not d.getVarFlag("do_" + task, 'mounts-noauto') == "1":
+        bb.build.exec_func('mounts_task_postfunc', d)
+}
+mounts_taskfail[eventmask] = "bb.build.TaskFailed"
+
+# bb.event.Build* handlers don't have a task context.
+# Don't access MOUNTS_CONTEXT from here!
+addhandler mounts_init
+python mounts_init() {
+    bb.utils.remove(d.getVar('MOUNTS_DB') + "/*.mounttab")
+    bb.utils.remove(d.getVar('MOUNTS_DB') + "/*.mountlock")
+}
+mounts_init[eventmask] = "bb.event.BuildStarted"
+
+addhandler mounts_cleanup
+python mounts_cleanup() {
+    # look through MOUNTS_DB for contexts
+    import glob
+    import time
+    base = d.getVar('MOUNTS_DB')
+    locks = glob.glob(base + "/*.mountlock")
+    tabs = glob.glob(base + "/*.mounttab")
+
+    # there should not be any locks?
+    if len(locks) > 0:
+        bb.error(f"mounts_cleanup: someone still holding lock? ({str(locks)})")
+
+    # cleanup any existing contexts
+    for mtab_file in tabs:
+        mtab = read_mtab(d, mtab_file)
+        if len(mtab) > 0:
+            bb.note(f"mounts_cleanup: {mtab_file.split('/')[-1]}")
+
+        done = []
+        for target, mt in mtab.items():
+            if mt.count < 0:
+                bb.error("count on {target} < 0. BUG!?!")
+                continue
+            if mt.count > 0:
+                bb.error(f"cound on {target} > 0. BUG!?!")
+
+            bb.note(f"mounts_cleanup: unmounting {target}")
+            for i in range(10):
+                try:
+                    d.setVar('UMOUNT_ARG_TARGET', target)
+                    bb.build.exec_func('umount_' + mt.cmd, d)
+                    done.append(target)
+                    break
+                except bb.process.ExecutionError as e:
+                    if e.exitcode == 32:
+                        # target busy
+                        time.sleep(1)
+                        continue
+                    else:
+                        bb.error(f"umount({task}): {str(e)}")
+                        done.append(target)
+                        break
+                bb.warn(f"mounts_cleanup: failed to umount {target}")
+                done.append(target)
+
+        for target in done:
+            del mtab[target]
+        write_mtab(d, mtab, mtab_file)
+}
+
+mounts_cleanup[eventmask] = "bb.event.BuildCompleted"
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index 7f5901d..4726eaf 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -113,7 +113,7 @@  PARALLEL_MAKE ?= "-j ${@bb.utils.cpu_count()}"
 BBINCLUDELOGS ??= "yes"
 
 # Add event handlers for bitbake
-INHERIT += "isar-events"
+INHERIT += "mounts isar-events"
 
 include conf/local.conf
 include conf/multiconfig/${BB_CURRENT_MC}.conf