#!/usr/bin/perl
#
# btrbk - Create snapshots and remote backups of btrfs subvolumes
#
# Copyright (C) 2014-2021 Axel Burri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------
# The official btrbk website is located at:
# https://digint.ch/btrbk/
#
# Author:
# Axel Burri <axel@tty0.ch>
# ---------------------------------------------------------------------

use strict;
use warnings FATAL => qw( all ), NONFATAL => qw( deprecated );

use Carp qw(confess);
use Getopt::Long qw(GetOptions);
use Time::Local qw( timelocal timegm timegm_nocheck );
use IPC::Open3 qw(open3);
use Symbol qw(gensym);

our $VERSION         = '0.31.3';
our $AUTHOR          = 'Axel Burri <axel@tty0.ch>';
our $PROJECT_HOME    = '<https://digint.ch/btrbk/>';

our $BTRFS_PROGS_MIN = '4.12';  # required since btrbk-v0.27.0

my  $VERSION_INFO    = "btrbk command line client, version $VERSION";


my @config_src = ("/etc/btrbk.conf", "/etc/btrbk/btrbk.conf");

my %compression = (
  # NOTE: also adapt "compress_list" in ssh_filter_btrbk.sh if you change this
  gzip   => { name => 'gzip',   format => 'gz',  compress_cmd => [ 'gzip',   '-c' ], decompress_cmd => [ 'gzip',   '-d', '-c' ], level_min => 1, level_max => 9 },
  pigz   => { name => 'pigz',   format => 'gz',  compress_cmd => [ 'pigz',   '-c' ], decompress_cmd => [ 'pigz',   '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
  bzip2  => { name => 'bzip2',  format => 'bz2', compress_cmd => [ 'bzip2',  '-c' ], decompress_cmd => [ 'bzip2',  '-d', '-c' ], level_min => 1, level_max => 9 },
  pbzip2 => { name => 'pbzip2', format => 'bz2', compress_cmd => [ 'pbzip2', '-c' ], decompress_cmd => [ 'pbzip2', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
  xz     => { name => 'xz',     format => 'xz',  compress_cmd => [ 'xz',     '-c' ], decompress_cmd => [ 'xz',     '-d', '-c' ], level_min => 0, level_max => 9, threads => '-T' },
  lzo    => { name => 'lzo',    format => 'lzo', compress_cmd => [ 'lzop',   '-c' ], decompress_cmd => [ 'lzop',   '-d', '-c' ], level_min => 1, level_max => 9 },
  lz4    => { name => 'lz4',    format => 'lz4', compress_cmd => [ 'lz4',    '-c' ], decompress_cmd => [ 'lz4',    '-d', '-c' ], level_min => 1, level_max => 9 },
  zstd   => { name => 'zstd',   format => 'zst', compress_cmd => [ 'zstd',   '-c' ], decompress_cmd => [ 'zstd',   '-d', '-c' ], level_min => 1, level_max => 19, threads => '-T', long => '--long=', adapt => '--adapt' },
 );

my $compress_format_alt = join '|', map { $_->{format} } values %compression; # note: this contains duplicate alternations
my $ipv4_addr_match = qr/(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])/;
my $ipv6_addr_match = qr/[a-fA-F0-9]*:[a-fA-F0-9]*:[a-fA-F0-9:]+/; # simplified (contains at least two colons), matches "::1", "2001:db8::7"
my $host_name_match = qr/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/;
my $file_match = qr/[0-9a-zA-Z_@\+\-\.\/]+/;  # note: ubuntu uses '@' in the subvolume layout: <https://help.ubuntu.com/community/btrfs>
my $glob_match = qr/[0-9a-zA-Z_@\+\-\.\/\*]+/;  # file_match plus '*'
my $uuid_match = qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/;
my $btrbk_timestamp_match = qr/(?<YYYY>[0-9]{4})(?<MM>[0-9]{2})(?<DD>[0-9]{2})(T(?<hh>[0-9]{2})(?<mm>[0-9]{2})((?<ss>[0-9]{2})(?<zz>(Z|[+-][0-9]{4})))?)?(_(?<NN>[0-9]+))?/;  # matches "YYYYMMDD[Thhmm[ss+0000]][_NN]"
my $raw_postfix_match_DEPRECATED = qr/--(?<received_uuid>$uuid_match)(\@(?<parent_uuid>$uuid_match))?\.btrfs?(\.(?<compress>($compress_format_alt)))?(\.(?<encrypt>gpg))?(\.(?<split>split))?(\.(?<incomplete>part))?/;  # matches ".btrfs_<received_uuid>[@<parent_uuid>][.gz|bz2|xz][.gpg][.split][.part]"
my $raw_postfix_match = qr/\.btrfs(\.($compress_format_alt))?(\.(gpg|encrypted))?/;  # matches ".btrfs[.gz|bz2|xz][.gpg|encrypted]"

my $group_match = qr/[a-zA-Z0-9_:-]+/;
my $ssh_cipher_match = qr/[a-z0-9][a-z0-9@.-]+/;

my %day_of_week_map = ( sunday => 0, monday => 1, tuesday => 2, wednesday => 3, thursday => 4, friday => 5, saturday => 6 );
my @syslog_facilities = qw( user mail daemon auth lpr news cron authpriv local0 local1 local2 local3 local4 local5 local6 local7 );

my %config_options = (
  # NOTE: the parser always maps "no" to undef
  # NOTE: keys "volume", "subvolume" and "target" are hardcoded
  # NOTE: files "." and "no" map to <undef>
  timestamp_format            => { default => "short",   accept => [ "short", "long", "long-iso" ], context => [ "global", "volume", "subvolume" ] },
  snapshot_dir                => { default => undef,     accept_file => { relative => 1 }, context => [ "global", "volume", "subvolume" ] },
  snapshot_name               => { c_default => 1,       accept_file => { name_only => 1 }, context => [ "subvolume" ], deny_glob_context => 1 },   # NOTE: defaults to the subvolume name (hardcoded)
  snapshot_create             => { default => "always",  accept => [ "no", "always", "ondemand", "onchange" ], context => [ "global", "volume", "subvolume" ] },
  incremental                 => { default => "yes",     accept => [ "yes", "no", "strict" ] },
  incremental_clones          => { default => 0,         accept_numeric => 1 },
  incremental_resolve         => { default => "mountpoint", accept => [ "mountpoint", "directory", "_all_accessible" ] },
  preserve_day_of_week        => { default => "sunday",  accept => [ (keys %day_of_week_map) ] },
  preserve_hour_of_day        => { default => 0,         accept => [ (0..23) ]  },
  snapshot_preserve           => { default => undef,     accept => [ "no" ], accept_preserve_matrix => 1, context => [ "global", "volume", "subvolume" ], },
  snapshot_preserve_min       => { default => "all",     accept => [ "all", "latest" ], accept_regexp => qr/^[1-9][0-9]*[hdwmy]$/, context => [ "global", "volume", "subvolume" ], },
  target_preserve             => { default => undef,     accept => [ "no" ], accept_preserve_matrix => 1 },
  target_preserve_min         => { default => "all",     accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
  archive_preserve            => { default => undef,     accept => [ "no" ], accept_preserve_matrix => 1, context => [ "global" ] },
  archive_preserve_min        => { default => "all",     accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/, context => [ "global" ] },
  btrfs_commit_delete         => { default => undef,     accept => [ "after", "each", "no" ] },
  ssh_identity                => { default => undef,     accept_file => { absolute => 1 } },
  ssh_user                    => { default => "root",    accept_regexp => qr/^[a-z_][a-z0-9_-]*$/ },
  ssh_compression             => { default => undef,     accept => [ "yes", "no" ] },
  ssh_cipher_spec             => { default => "default", accept_regexp => qr/^$ssh_cipher_match(,$ssh_cipher_match)*$/ },
  transaction_log             => { default => undef,     accept => [ "no" ], accept_file => { absolute => 1 }, context => [ "global" ] },
  transaction_syslog          => { default => undef,     accept => [ "no", @syslog_facilities ], context => [ "global" ] },
  lockfile                    => { default => undef,     accept => [ "no" ], accept_file => { absolute => 1 }, context => [ "global" ] },

  rate_limit                  => { default => undef,     accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgtKMGT]?$/, require_bin => 'mbuffer' },
  rate_limit_remote           => { default => undef,     accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgtKMGT]?$/ },  # NOTE: requires 'mbuffer' command on remote hosts
  stream_buffer               => { default => undef,     accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgKMG%]?$/,  require_bin => 'mbuffer' },
  stream_buffer_remote        => { default => undef,     accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgKMG%]?$/  },  # NOTE: requires 'mbuffer' command on remote hosts
  stream_compress             => { default => undef,     accept => [ "no", (keys %compression) ] },
  stream_compress_level       => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  stream_compress_long        => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  stream_compress_threads     => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  stream_compress_adapt       => { default => undef,     accept => [ "yes", "no" ] },

  raw_target_compress         => { default => undef,     accept => [ "no", (keys %compression) ] },
  raw_target_compress_level   => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  raw_target_compress_long    => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  raw_target_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
  raw_target_encrypt          => { default => undef,     accept => [ "no", "gpg", "openssl_enc" ] },
  raw_target_block_size       => { default => "128K",    accept_regexp => qr/^[0-9]+[kmgKMG]?$/ },
  raw_target_split            => { default => undef,     accept => [ "no" ], accept_regexp => qr/^[0-9]+([kmgtpezyKMGTPEZY][bB]?)?$/ },
  gpg_keyring                 => { default => undef,     accept_file => { absolute => 1 } },
  gpg_recipient               => { default => undef,     accept_regexp => qr/^[0-9a-zA-Z_@\+\-\.]+$/ },
  openssl_ciphername          => { default => "aes-256-cbc", accept_regexp => qr/^[0-9a-zA-Z\-]+$/ },
  openssl_iv_size             => { default => undef,     accept => [ "no" ], accept_numeric => 1 },
  openssl_keyfile             => { default => undef,     accept_file => { absolute => 1 } },

  kdf_backend                 => { default => undef,     accept_file => { absolute => 1 } },
  kdf_keysize                 => { default => "32",      accept_numeric => 1 },
  kdf_keygen                  => { default => "once",    accept => [ "once", "each" ] },

  group                       => { default => undef,     accept_regexp => qr/^$group_match(\s*[,\s]\s*$group_match)*$/, allow_multiple => 1, split => qr/\s*[,\s]\s*/ },
  noauto                      => { default => undef,     accept => [ "yes", "no" ] },

  backend                     => { default => "btrfs-progs", accept => [       "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
  backend_local               => { default => undef,         accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
  backend_remote              => { default => undef,         accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
  backend_local_user          => { default => undef,         accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },

  compat                      => { default => undef,     accept => [ "no", "busybox" ] },
  compat_local                => { default => undef,     accept => [ "no", "busybox" ] },
  compat_remote               => { default => undef,     accept => [ "no", "busybox" ] },

  snapshot_qgroup_destroy     => { default => undef,     accept => [ "yes", "no" ], context => [ "global", "volume", "subvolume" ] },
  target_qgroup_destroy       => { default => undef,     accept => [ "yes", "no" ] },
  archive_qgroup_destroy      => { default => undef,     accept => [ "yes", "no" ], context => [ "global" ] },

  archive_exclude             => { default => undef,     accept_file => { wildcards => 1 }, allow_multiple => 1, context => [ "global" ] },
  archive_exclude_older       => { default => undef,     accept => [ "yes", "no" ] },

  cache_dir                   => { default => undef,     accept_file => { absolute => 1 }, allow_multiple => 1, context => [ "global" ] },
  ignore_extent_data_inline   => { default => "yes",     accept => [ "yes", "no" ] },

  warn_unknown_targets        => { default => undef,     accept => [ "yes", "no" ] },

  # deprecated options
  ssh_port                    => { default => "default", accept => [ "default" ], accept_numeric => 1,
                                   deprecated => { DEFAULT => { warn => 'Please use "ssh://hostname[:port]" notation in the "volume" and "target" configuration lines.' } } },
  btrfs_progs_compat          => { default => undef, accept => [ "yes", "no" ],
                                   deprecated => { DEFAULT => { ABORT => 1, warn => 'This feature has been dropped in btrbk-v0.23.0. Please update to newest btrfs-progs, AT LEAST >= $BTRFS_PROGS_MIN' } } },
  snapshot_preserve_daily     => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "global", "volume", "subvolume" ],
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
  snapshot_preserve_weekly    => { default => 0, accept => [ "all" ], accept_numeric => 1, context => [ "global", "volume", "subvolume" ],
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
  snapshot_preserve_monthly   => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "global", "volume", "subvolume" ],
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
  target_preserve_daily       => { default => 'all', accept => [ "all" ], accept_numeric => 1,
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
  target_preserve_weekly      => { default => 0, accept => [ "all" ], accept_numeric => 1,
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
  target_preserve_monthly     => { default => 'all', accept => [ "all" ], accept_numeric => 1,
                                   deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
  resume_missing              => { default => "yes", accept => [ "yes", "no" ],
                                   deprecated => { yes => { warn => 'ignoring (missing backups are always resumed since btrbk v0.23.0)' },
                                                   no  => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve_min latest" and "target_preserve no" if you want to keep only the latest backup', } } },
  snapshot_create_always      => { default => undef, accept => [ "yes", "no" ],
                                   deprecated => { yes => { warn => "Please use \"snapshot_create always\"",
                                                            replace_key   => "snapshot_create",
                                                            replace_value => "always",
                                                           },
                                                   no  => { warn => "Please use \"snapshot_create no\" or \"snapshot_create ondemand\"",
                                                            replace_key   => "snapshot_create",
                                                            replace_value => "ondemand",
                                                           }
                                                  },
                                 },
  receive_log                 => { default => undef, accept => [ "sidecar", "no" ], accept_file => { absolute => 1 },
                                   deprecated => { DEFAULT => { warn => "ignoring" } },
                                 }
 );

my @config_target_types = qw(send-receive raw);

my %table_formats = (
  config_volume => {
    table => [ qw( -volume_host -volume_port volume_path ) ],
    long  => [ qw(  volume_host -volume_port volume_path -volume_rsh ) ],
    raw   => [ qw( volume_url volume_host volume_port volume_path volume_rsh ) ],
    single_column => [ qw( volume_url ) ],
  },

  config_source => {
    table => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name ) ],
    long  => [ qw(  source_host -source_port source_subvolume snapshot_path snapshot_name -source_rsh ) ],
    raw   => [ qw( source_url source_host source_port source_path snapshot_path snapshot_name source_rsh ) ],
    single_column => [ qw( source_url ) ],
  },

  config_target => {
    table => [ qw( -target_host -target_port target_path ) ],
    long  => [ qw(  target_host -target_port target_path -target_rsh ) ],
    raw   => [ qw( target_url target_host target_port target_path target_rsh ) ],
    single_column => [ qw( target_url ) ],
  },

  config => {
    table => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name -target_host -target_port target_path ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name -target_host -target_port target_path target_type snapshot_preserve target_preserve ) ],
    raw   => [ qw( source_url source_host source_port source_subvolume snapshot_path snapshot_name target_url target_host target_port target_path target_type snapshot_preserve target_preserve source_rsh target_rsh ) ],
  },

  resolved => {
    # NOTE: snapshot_path is ambigous and does NOT print SUBVOL_PATH here (should be snapshot_subvolume, left as-is for compatibility)
    table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume             ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume target_type ) ],
    raw   => [ qw( type source_url source_host source_port source_path snapshot_path snapshot_name status target_url target_host target_port target_path target_type source_rsh target_rsh ) ],
  },

  snapshots => {
    table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status ) ],
    raw   => [ qw( source_url source_host source_port source_path snapshot_subvolume snapshot_name status source_rsh ) ],
    single_column => [ qw( snapshot_url ) ],
  },

  backups => { # same as resolved, except for single_column
    # NOTE: snapshot_path is ambigous and does NOT print SUBVOL_PATH here (should be snapshot_subvolume, left as-is for compatibility)
    table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume             ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume target_type ) ],
    raw   => [ qw( type source_url source_host source_port source_path snapshot_path snapshot_name status target_url target_host target_port target_path target_type source_rsh target_rsh ) ],
    single_column => [ qw( target_url ) ],
  },

  latest => { # same as resolved, except hiding target if not present
    # NOTE: snapshot_path is ambigous and does NOT print SUBVOL_PATH here (should be snapshot_subvolume, left as-is for compatibility)
    table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port -target_subvolume              ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port -target_subvolume -target_type ) ],
    raw   => [ qw( type source_url source_host source_port source_path snapshot_path snapshot_name status target_url target_host target_port target_path target_type source_rsh target_rsh ) ],
  },

  stats => {
    table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume  -target_host -target_port -target_subvolume snapshots  -backups ) ],
    long  => [ qw( -source_host -source_port source_subvolume snapshot_subvolume  -target_host -target_port -target_subvolume snapshot_status backup_status snapshots -backups -correlated -orphaned -incomplete ) ],
    raw   => [ qw( source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name target_url target_host target_port target_subvolume snapshot_status backup_status snapshots backups correlated orphaned incomplete ) ],
    RALIGN => { snapshots=>1, backups=>1, correlated=>1, orphaned=>1, incomplete=>1 },
  },

  schedule => {
    table => [ qw( action -host -port subvolume scheme reason ) ],
    long  => [ qw( action -host -port root_path subvolume_path scheme reason ) ],
    raw   => [ qw( topic action url host port path hod dow min h d w m y) ],
  },

  usage => {
    table => [ qw(      -host -port path size used free ) ],
    long  => [ qw( type -host -port path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
    raw   => [ qw( type host  port path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
    RALIGN => { size=>1, used=>1, device_size=>1, device_allocated=>1, device_unallocated=>1, device_missing=>1, device_used=>1, free=>1, free_min=>1, data_ratio=>1, metadata_ratio=>1, global_reserve=>1, global_reserve_used=>1 },
  },

  transaction => {
    table  => [ qw(                type status          -target_host -target_port target_subvolume -source_host -source_port source_subvolume parent_subvolume ) ],
    long   => [ qw(      localtime type status duration  target_host -target_port target_subvolume  source_host -source_port source_subvolume parent_subvolume message ) ],
    tlog   => [ qw(      localtime type status          target_url source_url parent_url message ) ],
    syslog => [ qw(                type status          target_url source_url parent_url message ) ],
    raw    => [ qw( time localtime type status duration target_url source_url parent_url message ) ],
  },

  origin_tree => {
    table => [ qw( tree uuid parent_uuid received_uuid ) ],
    long  => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
    raw   => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
  },

  diff => {
    table => [ qw( flags count size file ) ],
    long  => [ qw( flags count size file ) ],
    raw   => [ qw( flags count size file ) ],
    RALIGN => { count=>1, size=>1 },
  },

  fs_list => {
    table => [ qw( -host mount_source mount_subvol mount_point id flags subvolume_path path ) ],
    short => [ qw( -host mount_source                          id flags                path ) ],
    long  => [ qw( -host mount_source id top cgen gen uuid parent_uuid received_uuid flags path ) ],
    raw   => [ qw(  host mount_source mount_subvol mount_point mount_subvolid id top_level cgen gen uuid parent_uuid received_uuid readonly path subvolume_path subvolume_rel_path url ) ],
    single_column => [ qw( url ) ],
  },

  extent_diff => {
    table => [ qw(             total exclusive -diff -set subvol ) ],
    long  => [ qw( id cgen gen total exclusive -diff -set subvol ) ],
    raw   => [ qw( id cgen gen total exclusive -diff -set subvol ) ],
    RALIGN => { total=>1, exclusive=>1, diff=>1, set=>1, cgen=>1, gen=>1 },
  },
);

my %backend_cmd_map = (
  "btrfs-progs-btrbk" => { "btrfs subvolume list"     => [ "btrfs-subvolume-list"     ],
                           "btrfs subvolume show"     => [ "btrfs-subvolume-show"     ],
                           "btrfs subvolume snapshot" => [ "btrfs-subvolume-snapshot" ],
                           "btrfs subvolume delete"   => [ "btrfs-subvolume-delete"   ],
                           "btrfs send"               => [ "btrfs-send"               ],
                           "btrfs receive"            => [ "btrfs-receive"            ],
                           "btrfs filesystem usage"   => [ "btrfs-filesystem-usage"   ],
                           "btrfs qgroup destroy"     => [ "btrfs-qgroup-destroy"     ],
                         },
  "btrfs-progs-sudo" =>  { "btrfs subvolume list"     => [ "sudo", "-n", "btrfs", "subvolume", "list"     ],
                           "btrfs subvolume show"     => [ "sudo", "-n", "btrfs", "subvolume", "show"     ],
                           "btrfs subvolume snapshot" => [ "sudo", "-n", "btrfs", "subvolume", "snapshot" ],
                           "btrfs subvolume delete"   => [ "sudo", "-n", "btrfs", "subvolume", "delete"   ],
                           "btrfs send"               => [ "sudo", "-n", "btrfs", "send"                  ],
                           "btrfs receive"            => [ "sudo", "-n", "btrfs", "receive"               ],
                           "btrfs filesystem usage"   => [ "sudo", "-n", "btrfs", "filesystem", "usage"   ],
                           "btrfs qgroup destroy"     => [ "sudo", "-n", "btrfs", "qgroup", "destroy"     ],
                           "readlink"                 => [ "sudo", "-n", "readlink"                       ],
                         },
);

# keys used in raw target sidecar files (.info):
my %raw_info_sort = (
  TYPE                 => 1,
  FILE                 => 2,
  RECEIVED_UUID        => 3,
  RECEIVED_PARENT_UUID => 4,
  INCOMPLETE           => 5,
  # disabled for now, as its not very useful and might leak information
  #source_url           => 6,
  #parent_url           => 7,
  #target_url           => 8,
  compress             => 9,
  split                => 10,
  encrypt              => 11,
  cipher               => 12,
  iv                   => 13,
  # kdf_* (generated by kdf_backend)
 );

my %raw_url_cache;   # map URL to (fake) btr_tree node
my %mountinfo_cache; # map MACHINE_ID to mount points (sorted descending by file length)
my %mount_source_cache; # map URL_PREFIX:mount_source (aka device) to btr_tree node
my %uuid_cache;      # map UUID to btr_tree node
my %realpath_cache;  # map URL to realpath (symlink target). empty string denotes an error.

my $tree_inject_id   = 0;  # fake subvolume id for injected nodes (negative)
my $fake_uuid_prefix = 'XXXXXXXX-XXXX-XXXX-XXXX-'; # plus 0-padded inject_id: XXXXXXXX-XXXX-XXXX-XXXX-000000000000

my $program_name; # "btrbk" or "lsbtr", default to "btrbk"
my $dryrun;
my $loglevel = 1;
my $quiet;
my @exclude_vf;
my $do_dumper;
my $do_trace;
my $show_progress = 0;
my $output_format;
my $output_pretty = 0;
my @output_unit;
my $lockfile;
my $tlog_fh;
my $syslog_enabled = 0;
my $current_transaction;
my @transaction_log;
my %config_override;
my @tm_now;  # current localtime ( sec, min, hour, mday, mon, year, wday, yday, isdst )
my @stderr;  # stderr of last run_cmd
my %warn_once;
my %kdf_vars;
my $kdf_session_key;


$SIG{__DIE__} = sub {
  print STDERR "\nERROR: process died unexpectedly (btrbk v$VERSION)";
  print STDERR "\nPlease contact the author: $AUTHOR\n\n";
  print STDERR "Stack Trace:\n----------------------------------------\n";
  Carp::confess @_;
};

$SIG{INT} = sub {
  print STDERR "\nERROR: Caught SIGINT, dumping transaction log:\n";
  action("signal", status => "SIGINT");
  print_formatted("transaction", \@transaction_log, output_format => "tlog", outfile => *STDERR);
  exit 1;
};

sub VERSION_MESSAGE
{
  print STDERR $VERSION_INFO . "\n\n";
}

sub HELP_MESSAGE
{
  return if($quiet);
#80-----------------------------------------------------------------------------
  if($program_name eq "lsbtr") {
    print STDERR <<"END_HELP_LSBTR";
usage: lsbtr [options] [path]...

options:
   -h, --help                display this help message
       --version             display version information
   -l, --long                use long listing format
   -u, --uuid                print uuid table (parent/received relations)
   -1, --single-column       Print path column only
       --raw                 print raw table format
   -v, --verbose             increase output verbosity
   -c, --config=FILE         specify btrbk configuration file
       --override=KEY=VALUE  globally override a configuration option

For additional information, see $PROJECT_HOME
END_HELP_LSBTR
  }
  else {
    print STDERR <<"END_HELP_BTRBK";
usage: btrbk [options] <command> [filter...]

options:
   -h, --help                display this help message
       --version             display version information
   -c, --config=FILE         specify configuration file
   -n, --dry-run             perform a trial run with no changes made
       --exclude=FILTER      exclude configured sections
   -p, --preserve            preserve all (do not delete anything)
       --preserve-snapshots  preserve snapshots (do not delete snapshots)
       --preserve-backups    preserve backups (do not delete backups)
       --wipe                delete all but latest snapshots
   -v, --verbose             be more verbose (increase logging level)
   -q, --quiet               be quiet (do not print backup summary)
   -l, --loglevel=LEVEL      set logging level (error, warn, info, debug, trace)
   -t, --table               change output to table format
   -L, --long                change output to long format
       --format=FORMAT       change output format, FORMAT=table|long|raw
   -S, --print-schedule      print scheduler details (for the "run" command)
       --progress            show progress bar on send-receive operation
       --lockfile=FILE       create and check lockfile
       --override=KEY=VALUE  globally override a configuration option

commands:
   run                   run snapshot and backup operations
   dryrun                don't run btrfs commands; show what would be executed
   snapshot              run snapshot operations only
   resume                run backup operations, and delete snapshots
   prune                 only delete snapshots and backups
   archive <src> <dst>   recursively copy all subvolumes
   clean                 delete incomplete (garbled) backups
   stats                 print snapshot/backup statistics
   list <subcommand>     available subcommands are (default "all"):
      all                snapshots and backups
      snapshots          snapshots
      backups            backups and correlated snapshots
      latest             most recent snapshots and backups
      config             configured source/snapshot/target relations
      source             configured source/snapshot relations
      volume             configured volume sections
      target             configured targets
   usage                 print filesystem usage
   ls <path>             list all btrfs subvolumes below path
   origin <subvol>       print origin information for subvolume
   diff <from> <to>      list file changes between related subvolumes
   extents [diff] <path> calculate accurate disk space usage

For additional information, see $PROJECT_HOME
END_HELP_BTRBK
  }
#80-----------------------------------------------------------------------------
}

sub _log_cont { my $p = shift; print STDERR $p . join("\n${p}... ", @_) . "\n"; }
sub TRACE { print STDERR map { "___ $_\n" } @_ if($loglevel >= 4) }
sub DEBUG { _log_cont("",          @_)         if($loglevel >= 3) }
sub INFO  { _log_cont("",          @_)         if($loglevel >= 2) }
sub WARN  { _log_cont("WARNING: ", @_)         if($loglevel >= 1) }
sub ERROR { _log_cont("ERROR: ",   @_) }

sub INFO_ONCE {
  my $t = shift;
  if($warn_once{INFO}{$t}) { TRACE("WARNING(again): $t", @_) if($do_trace); return 0; }
  else { $warn_once{INFO}{$t} = 1; INFO($t, @_); return 1; }
}
sub WARN_ONCE {
  my $t = shift;
  if($warn_once{WARN}{$t}) { TRACE("INFO(again): $t", @_) if($do_trace); return 0; }
  else { $warn_once{WARN}{$t} = 1; WARN($t, @_); return 1; }
}

sub VINFO {
  return undef unless($do_dumper);
  my $vinfo = shift; my $t = shift || "vinfo"; my $maxdepth = shift // 2;
  print STDERR Data::Dumper->new([$vinfo], [$t])->Maxdepth($maxdepth)->Dump();
}
sub SUBVOL_LIST {
  return undef unless($do_dumper);
  my $vol = shift; my $t = shift // "SUBVOL_LIST"; my $svl = vinfo_subvol_list($vol);
  print STDERR "$t:\n  " . join("\n  ", map { "$vol->{PRINT}/./$_->{SUBVOL_PATH}\t$_->{node}{id}" } @$svl) . "\n";
}


sub ABORTED($$;$)
{
  my $config = shift;
  my $abrt_key = shift // die;
  my $abrt = shift;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config
  unless(defined($abrt)) {
    # no key (only text) set: switch arguments, use default key
    $abrt = $abrt_key;
    $abrt_key = "abort_" . $config->{CONTEXT};
  }
  unless($abrt_key =~ /^skip_/) {
    # keys starting with "skip_" are not actions
    $abrt =~ s/\n/\\\\/g;
    $abrt =~ s/\r//g;
    action($abrt_key,
           status => "ABORT",
           vinfo_prefixed_keys("target", vinfo($config->{url}, $config)),
           message => $abrt,
          );
  }
  $config->{ABORTED} = { key => $abrt_key, text => $abrt };
}

sub IS_ABORTED($;$)
{
  my $config = shift;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config
  return undef unless(defined($config->{ABORTED}));
  my $abrt_key = $config->{ABORTED}->{key};
  return undef unless(defined($abrt_key));
  my $filter_prefix = shift;
  return ($abrt_key =~ /^$filter_prefix/) if($filter_prefix);
  return $abrt_key;
}

sub ABORTED_TEXT($)
{
  my $config = shift;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config
  return "" unless(defined($config->{ABORTED}));
  return $config->{ABORTED}->{text} // "";
}

sub FIX_MANUALLY($$)
{
  # treated as error, but does not abort config section
  my $config = shift;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config
  my $msg = shift // die;
  $config->{FIX_MANUALLY} //= [];
  push(@{$config->{FIX_MANUALLY}}, $msg);
}

sub eval_quiet(&)
{
  local $SIG{__DIE__};
  return eval { $_[0]->() }
}

sub require_data_dumper
{
  if(eval_quiet { require Data::Dumper; }) {
    Data::Dumper->import("Dumper");
    $Data::Dumper::Sortkeys  = 1;
    $Data::Dumper::Quotekeys = 0;
    $do_dumper = 1;
    # silence perl warning: Name "Data::Dumper::Sortkeys" used only once: possible typo at...
    TRACE "Successfully loaded Dumper module: sortkeys=$Data::Dumper::Sortkeys, quotekeys=$Data::Dumper::Quotekeys" if($do_trace);
  } else {
    WARN "Perl module \"Data::Dumper\" not found: data trace dumps disabled!" if($do_trace);
  }
}

sub init_transaction_log($$)
{
  my $file = shift;
  my $config_syslog_facility = shift;
  if(defined($file) && (not $dryrun)) {
    if(open($tlog_fh, ">> $file")) {
      # print headers (disabled)
      # print_formatted("transaction", [ ], output_format => "tlog", outfile => $tlog_fh);
      INFO "Using transaction log: $file";
    } else {
      $tlog_fh = undef;
      ERROR "Failed to open transaction log '$file': $!";
    }
  }
  if(defined($config_syslog_facility) && (not $dryrun)) {
    DEBUG "Opening syslog";
    if(eval_quiet { require Sys::Syslog; }) {
      $syslog_enabled = 1;
      Sys::Syslog::openlog("btrbk", "", $config_syslog_facility);
      DEBUG "Syslog enabled";
    }
    else {
      WARN "Syslog disabled: $@";
    }
  }
  action("DEFERRED", %$_) foreach (@transaction_log);
}

sub close_transaction_log()
{
  if($tlog_fh) {
    DEBUG "Closing transaction log";
    close $tlog_fh || ERROR "Failed to close transaction log: $!";
  }
  if($syslog_enabled) {
    DEBUG "Closing syslog";
    eval_quiet { Sys::Syslog::closelog(); };
  }
}

sub action($@)
{
  my $type = shift // die;
  my $h = { @_ };
  unless($type eq "DEFERRED") {
    my $time = $h->{time} // time;
    $h->{type} = $type;
    $h->{time} = $time;
    $h->{localtime} = timestamp($time, 'debug-iso');
    push @transaction_log, $h;
  }
  print_formatted("transaction", [ $h ], output_format => "tlog", no_header => 1, outfile => $tlog_fh) if($tlog_fh);
  print_formatted("transaction", [ $h ], output_format => "syslog", no_header => 1) if($syslog_enabled);  # dirty hack, this calls syslog()
  return $h;
}

sub start_transaction($@)
{
  my $type = shift // die;
  my $time = time;
  die("start_transaction() while transaction is running") if($current_transaction);
  my @actions = (ref($_[0]) eq "HASH") ? @_ : { @_ };  # single action is not hashref
  $current_transaction = [];
  foreach (@actions) {
    push @$current_transaction, action($type, %$_, status => ($dryrun ? "dryrun_starting" : "starting"), time => $time);
  }
}

sub end_transaction($$)
{
  my $type = shift // die;
  my $success = shift; # scalar or coderef: if scalar, status is set for all current transitions
  my $time = time;
  die("end_transaction() while no transaction is running") unless($current_transaction);
  foreach (@$current_transaction) {
    die("end_transaction() has different type") unless($_->{type} eq $type);
    my $status = (ref($success) ? &{$success} ($_) : $success) ? "success" : "ERROR";
    $status = "dryrun_" . $status if($dryrun);
    action($type, %$_, status => $status, time => $time, duration => ($dryrun ? undef : ($time - $_->{time})));
  }
  $current_transaction = undef;
}

sub syslog($)
{
  return undef unless($syslog_enabled);
  my $line = shift;
  eval_quiet { Sys::Syslog::syslog("info", $line); };
}

sub check_exe($)
{
  my $cmd = shift // die;
  foreach my $path (split(":", $ENV{PATH})) {
    return 1 if( -x "$path/$cmd" );
  }
  return 0;
}

sub stream_buffer_cmd_text($)
{
  my $opts = shift;
  my $rl_in     = $opts->{rate_limit_in} // $opts->{rate_limit}; # maximum read rate: b,k,M,G
  my $rl_out    = $opts->{rate_limit_out}; # maximum write rate: b,k,M,G
  my $bufsize   = $opts->{stream_buffer};  # b,k,M,G,% (default: 2%)
  my $blocksize = $opts->{blocksize};      # defaults to 10k
  my $progress  = $opts->{show_progress};

  # return empty array if mbuffer is not needed
  return () unless($rl_in || $rl_out || $bufsize || $progress);

  # NOTE: mbuffer takes defaults from /etc/mbuffer.rc
  my @cmd = ( "mbuffer" );
  push @cmd, ( "-v", "1" ); # disable warnings (they arrive asynchronously and cant be caught)
  push @cmd, "-q" unless($progress);
  push @cmd, ( "-s", $blocksize ) if($blocksize);
  push @cmd, ( "-m", lc($bufsize) ) if($bufsize);
  push @cmd, ( "-r", lc($rl_in) ) if($rl_in);
  push @cmd, ( "-R", lc($rl_out) ) if($rl_out);
  return { cmd_text => join(' ', @cmd) };
}

sub compress_cmd_text($;$)
{
  my $def = shift // die;
  my $decompress = shift;
  my $cc = $compression{$def->{key}};
  my @cmd = $decompress ? @{$cc->{decompress_cmd}} : @{$cc->{compress_cmd}};

  if((not $decompress) && defined($def->{level}) && ($def->{level} ne "default")) {
    my $level = $def->{level};
    if($level < $cc->{level_min}) {
      WARN_ONCE "Compression level capped to minimum for '$cc->{name}': $cc->{level_min}";
      $level = $cc->{level_min};
    }
    if($level > $cc->{level_max}) {
      WARN_ONCE "Compression level capped to maximum for '$cc->{name}': $cc->{level_max}";
      $level = $cc->{level_max};
    }
    push @cmd, '-' . $level;
  }
  if(defined($def->{threads}) && ($def->{threads} ne "default")) {
    my $thread_opt = $cc->{threads};
    if($thread_opt) {
      push @cmd, $thread_opt . $def->{threads};
    }
    else {
      WARN_ONCE "Threading is not supported for '$cc->{name}', ignoring";
    }
  }
  if(defined($def->{long}) && ($def->{long} ne "default")) {
    my $long_opt = $cc->{long};
    if($long_opt) {
      push @cmd, $long_opt . $def->{long};
    }
    else {
      WARN_ONCE "Long distance matching is not supported for '$cc->{name}', ignoring";
    }
  }
  if(defined($def->{adapt})) {
    my $adapt_opt = $cc->{adapt};
    if($adapt_opt) {
      push @cmd, $adapt_opt;
    }
    else {
      WARN_ONCE "Adaptive compression is not supported for '$cc->{name}', ignoring";
    }
  }
  return { cmd_text => join(' ', @cmd) };
}

sub decompress_cmd_text($)
{
  return compress_cmd_text($_[0], 1);
}

sub _piped_cmd_txt($)
{
  my $cmd_pipe = shift;
  my $cmd = "";
  my $pipe = "";
  my $last;
  foreach (map $_->{cmd_text}, @$cmd_pipe) {
    die if($last);
    if(/^>/) {
      # can't be first, must be last
      die unless($pipe);
      $last = 1;
      $pipe = ' ';
    }
    $cmd .= $pipe . $_;
    $pipe = ' | ';
  }
  return $cmd;
}

sub _safe_cmd($$)
{
  # NOTE: this function alters $aref: hashes of form: "{ unsafe => 'string' }" get translated to "string"
  my $aref = shift;
  my $offending = shift;
  foreach(@$aref) {
    if(ref($_) eq 'HASH') {
      $_ = $_->{unsafe};  # replace in-place
      # NOTE: all files must be absolute
      unless(defined(check_file($_, { absolute => 1 }))) {
        push @$offending, "\"$_\"";
      }
    }
  }
  return join(' ', @$aref);
}

sub run_cmd(@)
{
  # IPC::Open3 based implementation.
  # NOTE: multiple filters are not supported!

  my @cmd_pipe_in = (ref($_[0]) eq "HASH") ? @_ : { @_ };
  die unless(scalar(@cmd_pipe_in));
  @stderr = ();

  my $destructive = 0;
  my @cmd_pipe;
  my @unsafe_cmd;
  my $compressed = undef;
  my $large_output;
  my $stream_options = $cmd_pipe_in[0]->{stream_options} // {};
  my @filter_stderr;
  my $fatal_stderr;
  my $has_rsh;

  $cmd_pipe_in[0]->{stream_source} = 1;
  $cmd_pipe_in[-1]->{stream_sink}  = 1;

  foreach my $href (@cmd_pipe_in)
  {
    die if(defined($href->{cmd_text}));

    push @filter_stderr, ((ref($href->{filter_stderr}) eq "ARRAY") ? @{$href->{filter_stderr}} : $href->{filter_stderr}) if($href->{filter_stderr});
    $fatal_stderr = $href->{fatal_stderr} if($href->{fatal_stderr});
    $destructive = 1 unless($href->{non_destructive});
    $has_rsh = 1 if($href->{rsh});
    $large_output = 1 if($href->{large_output});

    if($href->{check_unsafe}) {
      _safe_cmd($href->{check_unsafe}, \@unsafe_cmd);
    }

    if($href->{redirect_to_file}) {
      die unless($href->{stream_sink});
      $href->{cmd_text} = _safe_cmd([ '>', $href->{redirect_to_file} ], \@unsafe_cmd);
    }
    elsif($href->{compress_stdin}) {
      # does nothing if already compressed correctly by stream_compress
      if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{compress_stdin}->{key}}->{format})) {
        # re-compress with different algorithm
        push @cmd_pipe, decompress_cmd_text($compressed);
        $compressed = undef;
      }
      unless($compressed) {
        push @cmd_pipe, compress_cmd_text($href->{compress_stdin});
        $compressed = $href->{compress_stdin};
      }

      next;
    }
    elsif($href->{cmd}) {
      $href->{cmd_text} = _safe_cmd($href->{cmd}, \@unsafe_cmd);
    }
    return undef unless(defined($href->{cmd_text}));

    my @rsh_compress_in;
    my @rsh_compress_out;
    my @decompress_in;

    # input stream compression: local, in front of rsh_cmd_pipe
    if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_source})) {
      if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$stream_options->{stream_compress}->{key}}->{format})) {
        # re-compress with different algorithm, should be avoided!
        push @rsh_compress_in, decompress_cmd_text($compressed);
        $compressed = undef;
      }
      if(not $compressed) {
        $compressed = $stream_options->{stream_compress};
        push @rsh_compress_in, compress_cmd_text($compressed);
      }
    }

    if($compressed && (not ($href->{compressed_ok}))) {
      push @decompress_in, decompress_cmd_text($compressed);
      $compressed = undef;
    }

    # output stream compression: remote, at end of rsh_cmd_pipe
    if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_sink}) && (not $compressed)) {
      $compressed = $stream_options->{stream_compress};
      push @rsh_compress_out, compress_cmd_text($compressed);
    }

    if($href->{rsh}) {
      # honor stream_buffer_remote, rate_limit_remote for stream source / sink
      my @rsh_stream_buffer_in  = $href->{stream_sink}   ? stream_buffer_cmd_text($stream_options->{rsh_sink}) : ();
      my @rsh_stream_buffer_out = $href->{stream_source} ? stream_buffer_cmd_text($stream_options->{rsh_source}) : ();

      my @rsh_cmd_pipe = (
        @decompress_in,
        @rsh_stream_buffer_in,
        $href,
        @rsh_stream_buffer_out,
        @rsh_compress_out,
       );
      @decompress_in = ();

      # fixup redirect_to_file
      if((scalar(@rsh_cmd_pipe) == 1) && ($rsh_cmd_pipe[0]->{redirect_to_file})) {
        # NOTE: direct redirection in ssh command does not work: "ssh '> outfile'"
        # we need to assemble: "ssh 'cat > outfile'"
        unshift @rsh_cmd_pipe, { cmd_text => 'cat' };
      }

      my $rsh_text = _safe_cmd($href->{rsh}, \@unsafe_cmd);
      return undef unless(defined($rsh_text));
      $href->{cmd_text} = $rsh_text . " '" . _piped_cmd_txt(\@rsh_cmd_pipe) . "'";
    }

    # local stream_buffer, rate_limit and show_progress in front of stream sink
    my @stream_buffer_in = $href->{stream_sink} ? stream_buffer_cmd_text($stream_options->{local_sink}) : ();

    push @cmd_pipe, (
      @decompress_in,   # empty if rsh
      @stream_buffer_in,
      @rsh_compress_in, # empty if not rsh
      $href,  # command or rsh_cmd_pipe
     );
  }

  my $cmd = _piped_cmd_txt(\@cmd_pipe);

  if(scalar(@unsafe_cmd)) {
    ERROR "Unsafe command `$cmd` (offending string: " . join(', ', @unsafe_cmd) . ')';
    return undef;
  }

  if($dryrun && $destructive) {
    DEBUG "### (dryrun) $cmd";
    return [];
  }
  DEBUG "### $cmd";


  # execute command
  my ($pid, $out_fh, $err_fh, @stdout);
  $err_fh = gensym;
  if(eval_quiet { $pid = open3(undef, $out_fh, $err_fh, $cmd); }) {
    chomp(@stdout = readline($out_fh));
    chomp(@stderr = readline($err_fh));
    waitpid($pid, 0);
    if($do_trace) {
      if($large_output) {
        TRACE "Command output lines=" . scalar(@stdout) . " (large_output, not dumped)";
      } else {
        TRACE map("[stdout] $_", @stdout);
      }
      TRACE map("[stderr] $_", @stderr);
    }
  }
  else {
    ERROR "Command execution failed ($!): `$cmd`";
    return undef;
  }

  # fatal errors
  if($? == -1) {
    ERROR "Command execution failed ($!): `$cmd`";
    return undef;
  }
  elsif ($? & 127) {
    my $signal = $? & 127;
    ERROR "Command execution failed (child died with signal $signal): `$cmd`";
    return undef;
  }
  my $exitcode = $? >> 8;

  # call hooks: fatal_stderr, filter_stderr
  if(($exitcode == 0) && $fatal_stderr) {
    $exitcode = -1 if(grep &{$fatal_stderr}(), @stderr);
  }
  foreach my $filter_fn (@filter_stderr) {
    @stderr = map { &{$filter_fn} ($exitcode); $_ // () } @stderr;
  }

  if($exitcode) {
    unshift @stderr, "sh: $cmd";
    if($has_rsh && ($exitcode == 255)) {
      # SSH returns exit status 255 if an error occurred (including
      # network errors, dns failures).
      unshift @stderr, "SSH command failed (exitcode=$exitcode)";
    } else {
      unshift @stderr, "Command execution failed (exitcode=$exitcode)";
    }
    DEBUG @stderr;
    return undef;
  }
  else {
    DEBUG "Command execution successful";
  }
  return \@stdout;
}


sub _btrfs_filter_stderr
{
  if(/^usage: / || /(unrecognized|invalid) option/) {
    WARN_ONCE "Using unsupported btrfs-progs < v$BTRFS_PROGS_MIN";
  }
  # strip error prefix (we print our own)
  # note that this also affects ssh_filter_btrbk.sh error strings
  s/^ERROR: //;
}


sub btrfs_filesystem_show($)
{
  my $vol = shift || die;
  my $path = $vol->{PATH} // die;
  return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem show", { unsafe => $path } ),
                  rsh => vinfo_rsh($vol),
                  non_destructive => 1,
                  filter_stderr => \&_btrfs_filter_stderr,
                 );
}


sub btrfs_filesystem_df($)
{
  my $vol = shift || die;
  my $path = $vol->{PATH} // die;
  return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem df", { unsafe => $path }),
                  rsh => vinfo_rsh($vol),
                  non_destructive => 1,
                  filter_stderr => \&_btrfs_filter_stderr,
                 );
}


sub btrfs_filesystem_usage($)
{
  my $vol = shift || die;
  my $path = $vol->{PATH} // die;
  my $ret = run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem usage", { unsafe => $path } ),
                     rsh => vinfo_rsh($vol),
                     non_destructive => 1,
                     filter_stderr => \&_btrfs_filter_stderr,
                    );
  unless(defined($ret)) {
    ERROR "Failed to fetch btrfs filesystem usage for: $vol->{PRINT}", @stderr;
    return undef;
  }

  return undef unless(defined($ret));

  my %detail;
  foreach(@$ret) {
    if(/^\s+Device size:\s+(\S+)/) {
      $detail{device_size} = $1;
    }
    elsif(/^\s+Device allocated:\s+(\S+)/) {
      $detail{device_allocated} = $1;
    }
    elsif(/^\s+Device unallocated:\s+(\S+)/) {
      $detail{device_unallocated} = $1;
    }
    elsif(/^\s+Device missing:\s+(\S+)/) {
      $detail{device_missing} = $1;
    }
    elsif(/^\s+Used:\s+(\S+)/) {
      $detail{device_used} = $1;
    }
    elsif(/^\s+Free \(estimated\):\s+(\S+)\s+\(min: (\S+)\)/) {
      $detail{free} = $1;
      $detail{free_min} = $2;
    }
    elsif(/^\s+Data ratio:\s+(\S+)/) {
      $detail{data_ratio} = $1;
    }
    elsif(/^\s+Metadata ratio:\s+(\S+)/) {
      $detail{metadata_ratio} = $1;
    }
    elsif(/^\s+Used:\s+(\S+)/) {
      $detail{used} = $1;
    }
    elsif(/^\s+Global reserve:\s+(\S+)\s+\(used: (\S+)\)/) {
      $detail{global_reserve} = $1;
      $detail{global_reserve_used} = $2;
    }
    else {
      TRACE "Failed to parse filesystem usage line \"$_\" for: $vol->{PRINT}" if($do_trace);
    }
  }
  DEBUG "Parsed " . scalar(keys %detail) . " filesystem usage detail items: $vol->{PRINT}";

  # calculate aggregate size / usage
  if($detail{data_ratio} =~ /^[0-9]+\.[0-9]+$/) {
    if($detail{device_size} =~ /^([0-9]+\.[0-9]+)(.*)/) {
      $detail{size} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
    }
    if($detail{device_used} =~ /^([0-9]+\.[0-9]+)(.*)/) {
      $detail{used} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
    }
  }

  TRACE(Data::Dumper->Dump([\%detail], ["btrfs_filesystem_usage($vol->{URL})"])) if($do_trace && $do_dumper);
  return \%detail;
}


# returns hashref with keys: (uuid parent_uuid id gen cgen top_level)
# for btrfs root, returns at least: (id is_root)
# for btrfs-progs >= 4.1, also returns key: "received_uuid"
# if present, also returns (unvalidated) keys: (name creation_time flags)
sub btrfs_subvolume_show($;@)
{
  my $vol = shift || die;
  my %opts = @_;
  my @cmd_options;
  push(@cmd_options, '--rootid=' . $opts{rootid}) if($opts{rootid});  # btrfs-progs >= 4.12
  my $path = $vol->{PATH} // die;
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume show", @cmd_options, { unsafe => $path }),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                    filter_stderr => \&_btrfs_filter_stderr,
                   );

  return undef unless(defined($ret));

  unless(scalar(@$ret)) {
    ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
    return undef;
  }

  # NOTE: the first line starts with a path:
  #  - btrfs-progs <  4.12 prints the full (absolute, resolved) path
  #  - btrfs-progs >= 4.12 prints the relative path to btrfs root (or "/" if it is the root)

  my %detail;
  if($ret->[0] =~ / is (btrfs root|toplevel subvolume)$/) {
    # btrfs-progs <  4.4 prints: "<subvol> is btrfs root"
    # btrfs-progs >= 4.4 prints: "<subvol> is toplevel subvolume"
    # btrfs-progs >= 4.8.3 does not enter here, as output shares format with regular subvolumes
    $detail{id} = 5;
  }
  else {
    my %trans = (
      "Name"                  => "name",
      "uuid"                  => "uuid",
      "UUID"                  => "uuid",            # btrfs-progs >= 4.1
      "Parent uuid"           => "parent_uuid",
      "Parent UUID"           => "parent_uuid",     # btrfs-progs >= 4.1
      "Received UUID"         => "received_uuid",   # btrfs-progs >= 4.1
      "Creation time"         => "creation_time",
      "Object ID"             => "id",
      "Subvolume ID"          => "id",              # btrfs-progs >= 4.1
      "Generation (Gen)"      => "gen",
      "Generation"            => "gen",             # btrfs-progs >= 4.1
      "Gen at creation"       => "cgen",
      "Parent"                => "parent_id",
      "Parent ID"             => "parent_id",       # btrfs-progs >= 4.1
      "Top Level"             => "top_level",
      "Top level ID"          => "top_level",       # btrfs-progs >= 4.1
      "Flags"                 => "flags",
     );
    foreach(@$ret) {
      next unless /^\s+(.+):\s+(.*)$/;
      my ($key, $value) = ($1, $2);
      if($trans{$key}) {
        $detail{$trans{$key}} = $value;
      } else {
        DEBUG "Ignoring subvolume detail \"$key: $value\" for: $vol->{PRINT}";
      }
    }
    DEBUG "Parsed " . scalar(keys %detail) . " subvolume detail items: $vol->{PRINT}";

    # NOTE: as of btrfs-progs v4.6.1, flags are either "-" or "readonly"
    $detail{readonly} = ($detail{flags} =~ /readonly/) ? 1 : 0 if($detail{flags});

    # validate required keys
    unless((defined($detail{parent_uuid}) && (($detail{parent_uuid} eq '-') || ($detail{parent_uuid} =~ /^$uuid_match$/))) &&
           (defined($detail{id}) && ($detail{id} =~ /^\d+$/) && ($detail{id} >= 5)) &&
           (defined($detail{gen}) && ($detail{gen} =~ /^\d+$/)) &&
           (defined($detail{cgen}) && ($detail{cgen} =~ /^\d+$/)) &&
           (defined($detail{top_level}) && ($detail{top_level} =~ /^\d+$/)) &&
           (defined($detail{readonly})))
    {
      ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
      return undef;
    }

    # NOTE: filesystems created with btrfs-progs < 4.16 have no UUID for subvolid=5,
    # assert {uuid} is either valid or undef
    if(defined($detail{uuid}) && ($detail{uuid} !~ /^$uuid_match$/)) {
      if($detail{id} == 5) {
        DEBUG "No UUID on btrfs root (id=5): $vol->{PRINT}";
      } else {
        ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
        return undef;
      }
      delete $detail{uuid};
    }

    # NOTE: received_uuid is not required here, as btrfs-progs < 4.1 does not give us that information.
    #       no worries, we get this from btrfs_subvolume_list() for all subvols.
    if(defined($detail{received_uuid}) && ($detail{received_uuid} ne '-') && ($detail{received_uuid} !~ /^$uuid_match$/)) {
      ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
      return undef;
    }

    VINFO(\%detail, "detail") if($loglevel >=4);
  }

  if($opts{rootid} && ($detail{id} != $opts{rootid})) {
    ERROR "Failed to parse subvolume detail (rootid mismatch) for: $vol->{PRINT}";
    return undef;
  }

  if($detail{id} == 5) {
    DEBUG "Found btrfs root: $vol->{PRINT}";
    $detail{is_root} = 1;
  }

  return \%detail;
}


sub btrfs_subvolume_list_readonly_flag($)
{
  my $vol = shift || die;
  my $path = $vol->{PATH} // die;

  my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", '-a', '-r', { unsafe => $path } ),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                    filter_stderr => \&_btrfs_filter_stderr,
                   );
  return undef unless(defined($ret));

  my %ro;
  foreach(@$ret) {
    unless(/^ID\s+([0-9]+)\s+gen\s+[0-9]+\s+top level\s+[0-9]+\s+path\s/) {
      ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
      DEBUG "Offending line: $_";
      return undef;
    }
    $ro{$1} = 1;
  }
  DEBUG "Parsed " . scalar(keys %ro) . " readonly subvolumes for filesystem at: $vol->{PRINT}";
  return \%ro;
}


sub btrfs_subvolume_list($;@)
{
  my $vol = shift || die;
  my %opts = @_;
  my $path = $vol->{PATH} // die;
  my @filter_options = ('-a');
  push(@filter_options, '-o') if($opts{subvol_only});

  # NOTE: btrfs-progs <= 3.17 do NOT support the '-R' flag.
  # NOTE: Support for btrfs-progs <= 3.17 has been dropped in
  #       btrbk-0.23, the received_uuid flag very essential!
  my @display_options = ('-c', '-u', '-q', '-R');
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", @filter_options, @display_options, { unsafe => $path } ),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                    filter_stderr => \&_btrfs_filter_stderr,
                   );
  return undef unless(defined($ret));

  my @nodes;
  foreach(@$ret)
  {
    my %node;
    # NOTE: btrfs-progs >= 4.13.2 pads uuid's with 36 whitespaces
    unless(/^ID            \s+ ([0-9]+)     \s+
            gen            \s+ ([0-9]+)     \s+
            cgen           \s+ ([0-9]+)     \s+
            top\ level     \s+ ([0-9]+)     \s+
            parent_uuid    \s+ ([0-9a-f-]+) \s+
            received_uuid  \s+ ([0-9a-f-]+) \s+
            uuid           \s+ ([0-9a-f-]+) \s+
            path           \s+ (.+)         $/x) {
      ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
      DEBUG "Offending line: $_";
      return undef;
    }
    %node = (
      id            => $1,
      gen           => $2,
      cgen          => $3,
      top_level     => $4,
      parent_uuid   => $5, # note: parent_uuid="-" if no parent
      received_uuid => $6,
      uuid          => $7,
      path          => $8  # btrfs path, NOT filesystem path
     );

    # NOTE: "btrfs subvolume list <path>" prints <FS_TREE> prefix only if
    # the subvolume is reachable within <path>. (as of btrfs-progs-3.18.2)
    #
    # NOTE: Be prepared for this to change in btrfs-progs!
    $node{path} =~ s/^<FS_TREE>\///;     # remove "<FS_TREE>/" portion from "path".

    push @nodes, \%node;
  }
  DEBUG "Parsed " . scalar(@nodes) . " total subvolumes for filesystem at: $vol->{PRINT}";

  return \@nodes;
}


sub btrfs_subvolume_list_complete($)
{
  my $vol = shift || die;

  # fetch subvolume list
  my $nodes = btrfs_subvolume_list($vol);
  return undef unless($nodes);

  # fetch readonly flag
  # NOTE: the only way to get "readonly" flag is via a second call to "btrfs subvol list" with the "-r" option (as of btrfs-progs v4.3.1)
  my $ro = btrfs_subvolume_list_readonly_flag($vol);
  return undef unless(defined($ro));
  foreach (@$nodes) {
    $_->{readonly} = $ro->{$_->{id}} // 0;
  }

  # btrfs root (id=5) is not provided by btrfs_subvolume_list above, read it separately (best-efford)
  my $tree_root = btrfs_subvolume_show($vol, rootid => 5);
  unless($tree_root) {
    # this is not an error:
    #  - btrfs-progs < 4.12 does not support rootid lookup
    #  - UUID can be missing if filesystem was created with btrfs-progs < 4.16
    DEBUG "Failed to fetch subvolume detail (old btrfs-progs?) for btrfs root (id=5) on: $vol->{PRINT}";
    $tree_root = { id => 5, is_root => 1 };
  }
  unshift(@$nodes, $tree_root);

  return $nodes;
}


sub btrfs_subvolume_find_new($$;$)
{
  my $vol = shift || die;
  my $path = $vol->{PATH} // die;
  my $lastgen = shift // die;
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume find-new", { unsafe => $path }, $lastgen ),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                    filter_stderr => \&_btrfs_filter_stderr,
                    large_output => 1,
                   );
  unless(defined($ret)) {
    ERROR "Failed to fetch modified files for: $vol->{PRINT}", @stderr;
    return undef;
  }

  my %files;
  my $parse_errors = 0;
  my $transid_marker;
  foreach(@$ret)
  {
    if(/^inode \S+ file offset (\S+) len (\S+) disk start \S+ offset \S+ gen (\S+) flags (\S+) (.+)$/) {
      my $file_offset = $1;
      my $len         = $2;
      my $gen         = $3;
      my $flags       = $4;
      my $name        = $5;
      $files{$name}->{len} += $len;
      $files{$name}->{new} = 1 if($file_offset == 0);
      $files{$name}->{gen}->{$gen} = 1;  # count the generations
      if($flags ne "NONE") {
        $files{$name}->{flags}{$_} = 1 foreach split(/\|/, $flags);
      }
    }
    elsif(/^transid marker was (\S+)$/) {
      $transid_marker = $1;
    }
    else {
      ERROR "Failed to parse output from `btrfs subvolume find-new`:", $_;
      $parse_errors++;
    }
  }
  ERROR "Failed to parse $parse_errors lines from `btrfs subvolume find-new`" if($parse_errors);

  return { files => \%files,
           transid_marker => $transid_marker,
           parse_errors => $parse_errors,
          };
}


# returns $target, or undef on error
sub btrfs_subvolume_snapshot($$)
{
  my $svol = shift || die;
  my $target_vol = shift // die;
  my $target_path = $target_vol->{PATH} // die;
  my $src_path = $svol->{PATH} // die;
  INFO "[snapshot] source: $svol->{PRINT}";
  INFO "[snapshot] target: $target_vol->{PRINT}";
  start_transaction("snapshot",
                    vinfo_prefixed_keys("target", $target_vol),
                    vinfo_prefixed_keys("source", $svol),
                   );
  my $ret = run_cmd(cmd => vinfo_cmd($svol, "btrfs subvolume snapshot", '-r', { unsafe => $src_path }, { unsafe => $target_path } ),
                    rsh => vinfo_rsh($svol),
                    filter_stderr => \&_btrfs_filter_stderr,
                   );
  end_transaction("snapshot", defined($ret));
  unless(defined($ret)) {
    ERROR "Failed to create snapshot: $svol->{PRINT} -> $target_path", @stderr;
    return undef;
  }
  return $target_vol;
}


sub btrfs_subvolume_delete($@)
{
  my $targets = shift // die;
  my %opts = @_;
  my $commit = $opts{commit};
  die if($commit && ($commit ne "after") && ($commit ne "each"));
  $targets = [ $targets ] unless(ref($targets) eq "ARRAY");
  return () unless(scalar(@$targets));

  # NOTE: rsh and backend command is taken from first target
  my $rsh_machine_check = $targets->[0]->{MACHINE_ID};
  my $target_type = $targets->[0]->{node}{TARGET_TYPE} || "";
  foreach (@$targets) {
    # assert all targets share same MACHINE_ID
    die if($rsh_machine_check ne $_->{MACHINE_ID});
    # assert all targets share same target type
    die if($target_type && ($_->{node}{TARGET_TYPE} ne $target_type));
  }

  INFO "[delete] options: commit-$commit" if($commit && (not $target_type));
  INFO "[delete] target: $_->{PRINT}" foreach(@$targets);
  start_transaction($opts{type} // "delete",
                    # NOTE: "target_url" from vinfo_prefixed_keys() is used for matching in end_transaction() below
                    map( { { vinfo_prefixed_keys("target", $_) }; } @$targets)
                   );
  my $ret;
  my @deleted;
  my %err_catch;
  if($target_type eq "raw") {
    my @cmd_target_paths;
    foreach(@$targets) {
      if($_->{node}{BTRBK_RAW}{split}) {
        push @cmd_target_paths, "$_->{PATH}.split_??"; # unsafe is checked with path.info below
      } else {
        push @cmd_target_paths, { unsafe => $_->{PATH} };
      }
      if($_->{node}{BTRBK_RAW}{INFO_FILE}) {
        # DEPRECATED raw format: no info file in deprecated format
        push @cmd_target_paths, { unsafe => "$_->{PATH}.info" };
      }
    }
    $ret = run_cmd(cmd  => ['rm', '-f',  @cmd_target_paths ],
                   rsh  => vinfo_rsh($targets->[0]),
                  );
    unless(defined($ret)) {
      foreach(@stderr) {
        next unless(/^rm: cannot remove '($file_match)':/);
        my $catch = $1; # make sure $catch matches $vol->{PATH}
        $catch =~ s/\.info$//;
        $catch =~ s/\.split_[a-z][a-z]$//;
        $err_catch{$catch} //= [];
        push(@{$err_catch{$catch}}, $_);
      }
    }
  }
  else {
    my @cmd_target_paths = map { { unsafe => $_->{PATH} } } @$targets;
    my @options;
    @options = ("--commit-$commit") if($commit);
    $ret = run_cmd(cmd => vinfo_cmd($targets->[0], "btrfs subvolume delete", @options, @cmd_target_paths ),
                   rsh => vinfo_rsh($targets->[0]),
                   fatal_stderr  => sub { m/^ERROR: /; },  # probably not needed, "btrfs sub delete" returns correct exit status
                   filter_stderr => \&_btrfs_filter_stderr,
                  );
    unless(defined($ret)) {
      foreach(@stderr) {
        next unless(/'($file_match)'/ || /: ($file_match)$/ || /($file_match):/);
        # NOTE: as of btrfs-progs-4.16, this does not catch anything
        $err_catch{$1} //= [];
        push(@{$err_catch{$1}}, $_);
      }
    }
  }

  if(defined($ret)) {
    @deleted = @$targets;
  }
  else {
    if(%err_catch) {
      my $catch_count = 0;
      foreach my $check_target (@$targets) {
        my $err_ary = $err_catch{$check_target->{PATH}};
        if($err_ary) {
          ERROR map("Failed to delete subvolume \"$check_target->{PRINT}\": $_", @$err_ary);
          $catch_count++;
        }
        else {
          push @deleted, $check_target;
        }
      }
      @deleted = () if($catch_count != (scalar keys %err_catch));
    }
    unless(scalar(@deleted)) {
      ERROR "Failed to match error messages from delete command, assuming nothing deleted", @stderr;
      ERROR map("Possibly not deleted subvolume: $_->{PRINT}", @$targets);
      ERROR "Consider running 'btrbk prune -n'";
    }
  }

  end_transaction($opts{type} // "delete", sub { my $h = shift; return (grep { $_->{URL} eq $h->{target_url} } @deleted); });
  return @deleted;
}


sub btrfs_qgroup_destroy($@)
{
  my $vol = shift // die;
  my %opts = @_;
  my $vol_id = $vol->{node}{id};
  unless($vol_id) {
    ERROR "Unknown subvolume_id for: $vol->{PRINT}";
    return undef;
  }
  my $path = $vol->{PATH} // die;
  my $qgroup_id = "0/$vol_id";
  INFO "[qgroup-destroy] qgroup_id: $qgroup_id";
  INFO "[qgroup-destroy] subvolume: $vol->{PRINT}";
  start_transaction($opts{type} // "qgroup_destroy",
                    vinfo_prefixed_keys("target", $vol));
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs qgroup destroy", $qgroup_id, { unsafe => $path }),
                    rsh => vinfo_rsh($vol),
                    filter_stderr => \&_btrfs_filter_stderr,
                   );
  end_transaction($opts{type} // "qgroup_destroy", defined($ret));
  unless(defined($ret)) {
    ERROR "Failed to destroy qgroup \"$qgroup_id\" for subvolume: $vol->{PRINT}", @stderr;
    return undef;
  }
  return $vol;
}


sub btrfs_send_receive($$;$$$)
{
  my $snapshot = shift || die;
  my $target = shift || die;
  my $parent = shift;
  my $clone_src = shift // [];
  my $ret_vol_received = shift;
  my $snapshot_path = $snapshot->{PATH} // die;
  my $target_path = $target->{PATH} // die;
  my $parent_path = $parent ? $parent->{PATH} : undef;

  my $vol_received = vinfo_child($target, $snapshot->{NAME});
  $$ret_vol_received = $vol_received if(ref $ret_vol_received);

  print STDOUT "Creating backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));

  INFO "[send/receive] target: $vol_received->{PRINT}";
  INFO "[send/receive] source: $snapshot->{PRINT}";
  INFO "[send/receive] parent: $parent->{PRINT}" if($parent);
  INFO "[send/receive] clone-src: $_->{PRINT}" foreach(@$clone_src);

  my $stream_options = config_stream_hash($snapshot, $target);

  my @send_options;
  my @receive_options;
  push(@send_options, '-p', { unsafe => $parent_path} ) if($parent_path);
  push(@send_options, '-c', { unsafe => $_ } ) foreach(map { $_->{PATH} } @$clone_src);
  # push(@send_options, '-v') if($loglevel >= 3);
  # push(@receive_options, '-v') if($loglevel >= 3);

  my @cmd_pipe;
  push @cmd_pipe, {
    cmd => vinfo_cmd($snapshot, "btrfs send", @send_options, { unsafe => $snapshot_path } ),
    rsh => vinfo_rsh($snapshot, disable_compression => $stream_options->{stream_compress}),
    name => "btrfs send",
    stream_options => $stream_options,
    filter_stderr => [ \&_btrfs_filter_stderr, sub { $_ = undef if(/^At subvol/) } ],
  };

  push @cmd_pipe, {
    cmd => vinfo_cmd($target, "btrfs receive", @receive_options, { unsafe => $target_path . '/' } ),
    rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
    name => "btrfs receive",
    fatal_stderr  => sub { m/^ERROR: /; }, # NOTE: btrfs-progs < 4.11: if "btrfs send" fails, "btrfs receive" returns 0!
  };

  my $send_receive_error = 0;
  start_transaction("send-receive",
                    vinfo_prefixed_keys("target", $vol_received),
                    vinfo_prefixed_keys("source", $snapshot),
                    vinfo_prefixed_keys("parent", $parent),
                   );
  my $ret = run_cmd(@cmd_pipe);
  my @cmd_err;
  unless(defined($ret)) {
    @cmd_err = @stderr; # save for later
    $send_receive_error = 1;
  }

  # Read in target subvolume metadata (btrfs subvolume show):
  # Double checking the output increases robustness against exotic
  # revisions of external commands (btrfs-progs, pv, xz, lz4, ...).
  #
  # NOTE: we cannot rely on the underlying shell to have
  # "pipefail" functionality.
  #
  # NOTE: btrfs-progs < 4.11:
  # "cat /dev/null | btrfs receive" returns with exitcode=0 and no
  # error message, having the effect that silently no subvolume is
  # created if any command in @cmd_pipe fail.
  my $is_garbled;
  if($dryrun) {
    INFO "[send/receive] (dryrun, skip) checking target metadata: $vol_received->{PRINT}";
  }
  else {
    INFO "[send/receive] checking target metadata: $vol_received->{PRINT}";
    my $detail = btrfs_subvolume_show($vol_received);
    if(defined($detail)) {
      unless($send_receive_error) {
        # plausibility checks on target detail
        unless($detail->{readonly}) {
          push @cmd_err, "target is not readonly: $vol_received->{PRINT}";
          $send_receive_error = 1;
        }
        if($detail->{received_uuid} && ($detail->{received_uuid} eq '-')) {
          # NOTE: received_uuid is not in @required_keys (needs btrfs-progs >= 4.1 (BTRFS_PROGS_MIN))
          # so we only check it if it's really present
          push @cmd_err, "received_uuid is not set on target: $vol_received->{PRINT}";
          $send_receive_error = 1;
        }
        if($parent && ($detail->{parent_uuid} eq '-')) {
          push @cmd_err, "parent_uuid is not set on target: $vol_received->{PRINT}";
          $send_receive_error = 1;
        }
        if((not $parent) && ($detail->{parent_uuid} ne '-')) {
          push @cmd_err, "parent_uuid is set on target: $vol_received->{PRINT}";
          $send_receive_error = 1;
        }
      }

      # incomplete received (garbled) subvolumes are not readonly and have no received_uuid
      $is_garbled = ((not $detail->{readonly}) && defined($detail->{received_uuid}) && ($detail->{received_uuid} eq '-'));
    }
    else {
      push @cmd_err, "failed to check target subvolume: $vol_received->{PRINT}", @stderr;
      $send_receive_error = 1;
    }
  }

  end_transaction("send-receive", not $send_receive_error);

  if($send_receive_error) {
    ERROR "Failed to send/receive subvolume: $snapshot->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $vol_received->{PRINT}", @cmd_err;
  }

  if($is_garbled) {
    # NOTE: btrfs-progs does not delete incomplete received (garbled) subvolumes,
    #       we need to do this by hand.
    # TODO: remove this as soon as btrfs-progs handle receive errors correctly.
    my @deleted = btrfs_subvolume_delete($vol_received, commit => "after", type => "delete_garbled");
    if(scalar(@deleted)) {
      WARN "Deleted partially received (garbled) subvolume: $vol_received->{PRINT}";
    }
    else {
      WARN "Deletion of partially received (garbled) subvolume failed, assuming clean environment: $vol_received->{PRINT}";
    }
  }
  return $send_receive_error ? undef : 1;
}


sub btrfs_send_to_file($$$;$$)
{
  my $source = shift || die;
  my $target = shift || die;
  my $parent = shift;
  my $ret_vol_received = shift;
  my $ret_raw_info = shift;
  my $source_path   = $source->{PATH} // die;
  my $target_path   = $target->{PATH} // die;
  my $parent_path   = $parent ? $parent->{PATH} : undef;
  my $parent_uuid   = $parent ? $parent->{node}{uuid} : undef ;
  my $received_uuid = $source->{node}{uuid};
  die unless($received_uuid);
  die if($parent && !$parent_uuid);

  # prepare raw_info (for vinfo_inject_child)
  my %raw_info = (
    TYPE => 'raw',
    RECEIVED_UUID => $received_uuid,
    INCOMPLETE => 1,
    # source_url => $source->{URL},
   );

  my $target_filename = $source->{NAME} || die;
  #  $target_filename .= "--$received_uuid";
  #  $target_filename .= '@' . $parent_uuid if($parent_uuid);
  $target_filename .= ".btrfs";

  my $compress = config_compress_hash($target, "raw_target_compress");
  my $encrypt = config_encrypt_hash($target, "raw_target_encrypt");
  my $split = config_key($target, "raw_target_split");
  my $stream_options = config_stream_hash($source, $target);

  # make sure we dont re-compress, override "stream_compress" with "raw_target_compress"
  $stream_options->{stream_compress} = $compress if($compress);

  my @send_options;
  push(@send_options, '-p', $parent_path) if($parent_path);
  #push(@send_options, '-v') if($loglevel >= 3);

  my @cmd_pipe;
  push @cmd_pipe, {
    cmd => vinfo_cmd($source, "btrfs send", @send_options, { unsafe => $source_path } ),
    rsh => vinfo_rsh($source, disable_compression => $stream_options->{stream_compress}),
    name => "btrfs send",
    stream_options => $stream_options,
    filter_stderr => [ \&_btrfs_filter_stderr, sub { $_ = undef if(/^At subvol/) } ],
    fatal_stderr  => sub { m/^ERROR: /; },
  };

  if($compress) {
    $raw_info{compress} = $compression{$compress->{key}}->{format};
    $target_filename .= '.' . $compression{$compress->{key}}->{format};
    push @cmd_pipe, { compress_stdin => $compress }; # does nothing if already compressed by stream_compress
  }
  if($encrypt) {
    $target_filename .= ($encrypt->{type} eq "gpg") ? '.gpg' : '.encrypted';
  }

  # NOTE: $ret_vol_received must always be set when function returns!
  my $vol_received = vinfo_child($target, $target_filename);
  $$ret_vol_received = $vol_received if(ref $ret_vol_received);

  if($encrypt) {
    $raw_info{encrypt} = $encrypt->{type};

    if($encrypt->{type} eq "gpg") {
      # NOTE: We set "--no-random-seed-file" since one of the btrbk
      # design principles is to never create any files unasked. Enabling
      # "--no-random-seed-file" creates ~/.gnupg/random_seed, and as
      # such depends on $HOME to be set correctly (which e.g. is set to
      # "/" by some cron daemons). From gpg2(1) man page:
      #   --no-random-seed-file GnuPG uses a file to store its
      #   internal random pool over invocations This makes random
      #   generation faster; however sometimes write operations are not
      #   desired. This option can be used to achieve that with the cost
      #   of slower random generation.
      my @gpg_options = ( '--batch', '--no-tty', '--no-random-seed-file', '--trust-model', 'always' );
      push @gpg_options, ( '--compress-algo', 'none' ) if($compress);  # NOTE: if --compress-algo is not set, gpg might still compress according to OpenPGP standard.
      push(@gpg_options, ( '--no-default-keyring', '--keyring', $encrypt->{keyring} )) if($encrypt->{keyring});
      push(@gpg_options, ( '--default-recipient', $encrypt->{recipient} )) if($encrypt->{recipient});
      push @cmd_pipe, {
        cmd => [ 'gpg', @gpg_options, '--encrypt' ],
        name => 'gpg',
        compressed_ok => ($compress ? 1 : 0),
      };
    }
    elsif($encrypt->{type} eq "openssl_enc") {
      # encrypt using "openssl enc"
      $raw_info{cipher} = $encrypt->{ciphername};

      # NOTE: iv is always generated locally!
      my $iv_size = $encrypt->{iv_size};
      my $iv;
      if($iv_size) {
        INFO "Generating iv for openssl encryption (cipher=$encrypt->{ciphername})";
        $iv = system_urandom($iv_size, 'hex');
        unless($iv) {
          ERROR "Failed generate IV for openssl_enc: $source->{PRINT}";
          return undef;
        }
        $raw_info{iv} = $iv;
      }

      my $encrypt_key;
      if($encrypt->{keyfile}) {
        if($encrypt->{kdf_backend}) {
          WARN "Both openssl_keyfile and kdf_backend are configured, ignoring kdf_backend!";
        }
        $encrypt_key = '$(cat ' . $encrypt->{keyfile} . ')';
      }
      elsif($encrypt->{kdf_backend}) {
        if($encrypt->{kdf_keygen_each}) {
          $kdf_session_key = undef;
          %kdf_vars = ();
        }
        if($kdf_session_key) {
          INFO "Reusing session key for: $vol_received->{PRINT}";
        }
        else {
          # run kdf backend, set session key and vars
          DEBUG "Generating session key for: $vol_received->{PRINT}";
          my $kdf_backend_name = $encrypt->{kdf_backend};
          $kdf_backend_name =~ s/^.*\///;
          my $key_target_text = $encrypt->{kdf_keygen_each} ? "\"$vol_received->{PRINT}\"" : "all raw backups";

          print STDOUT "\nGenerate session key for $key_target_text:\n";
          my $kdf_values = run_cmd(cmd => [ $encrypt->{kdf_backend}, $encrypt->{kdf_keysize} ],
                                   non_destructive => 1,
                                   name => $kdf_backend_name
                                  );
          unless(defined($kdf_values)) {
            ERROR "Failed to generate session key for $key_target_text", @stderr;
            return undef;
          }

          return undef unless(defined($kdf_values));
          foreach(@$kdf_values) {
            chomp;
            next if /^\s*$/; # ignore empty lines
            if(/^KEY=([0-9a-fA-f]+)/) {
              $kdf_session_key = $1;
            }
            elsif(/^([a-z_]+)=(.*)/) {
              my $info_key = 'kdf_' . $1;
              my $info_val = $2;
              DEBUG "Adding raw_info from kdf_backend: $info_key=$info_val";
              $kdf_vars{$info_key} = $info_val;
            }
            else {
              ERROR "Ambiguous line from kdf_backend: $encrypt->{kdf_backend}";
              return undef;
            }
          }
          unless($kdf_session_key && (length($kdf_session_key) == ($encrypt->{kdf_keysize} * 2))) {
            ERROR "Ambiguous key value from kdf_backend: $encrypt->{kdf_backend}";
            return undef;
          }
          INFO "Generated session key for: $vol_received->{PRINT}";
        }
        $encrypt_key = $kdf_session_key;
        %raw_info = ( %kdf_vars, %raw_info );
      }

      my @openssl_options = (
        '-' . $encrypt->{ciphername},
        '-K', $encrypt_key,
       );
      push @openssl_options, ('-iv', $iv) if($iv);

      push @cmd_pipe, {
        cmd => [ 'openssl', 'enc', '-e', @openssl_options ],
        name => 'openssl_enc',
        compressed_ok => ($compress ? 1 : 0),
      };
    }
    else {
      die "Usupported encryption type (raw_target_encrypt)";
    }
  }

  if($split) {
    # NOTE: we do not append a ".split" suffix on $target_filename here, as this propagates to ".info" file
    $raw_info{split} = $split;
    push @cmd_pipe, {
      cmd => [ 'split', '-b', uc($split), '-', "${target_path}/${target_filename}.split_" ],
      check_unsafe => [ { unsafe => "${target_path}/${target_filename}.split_" } ],
      rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
      compressed_ok => ($compress ? 1 : 0),
    }
  }
  else {
    push @cmd_pipe, {
      # NOTE: We use "dd" instead of shell redirections here, as it is
      # common to have special filesystems (like NFS, SMB, FUSE) mounted
      # on $target_path. By using "dd" we make sure to write in
      # reasonably large blocks (default=128K), which is not always the
      # case when using redirections (e.g. "gpg > outfile" writes in 8K
      # blocks).
      # Another approach would be to always pipe through "cat", which
      # uses st_blksize from fstat(2) (with a minimum of 128K) to
      # determine the block size.
      cmd => [ 'dd', 'status=none', 'bs=' . config_key($target, "raw_target_block_size"), "of=${target_path}/${target_filename}" ],
      check_unsafe => [ { unsafe => "${target_path}/${target_filename}" } ],
      #redirect_to_file => { unsafe => "${target_path}/${target_filename}" },  # alternative (use shell redirection), less overhead on local filesystems (barely measurable):
      rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
      compressed_ok => ($compress ? 1 : 0),
    };
  }

  $raw_info{FILE} = $target_filename;
  $raw_info{RECEIVED_PARENT_UUID} = $parent_uuid if($parent_uuid);
  # disabled for now, as its not very useful and might leak information:
  # $raw_info{parent_url} = $parent->{URL} if($parent);
  # $raw_info{target_url} = $vol_received->{URL};
  $$ret_raw_info = \%raw_info if($ret_raw_info);

  print STDOUT "Creating raw backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));

  INFO "[send-to-raw] target: $vol_received->{PRINT}";
  INFO "[send-to-raw] source: $source->{PRINT}";
  INFO "[send-to-raw] parent: $parent->{PRINT}" if($parent);

  start_transaction("send-to-raw",
                    vinfo_prefixed_keys("target", $vol_received),
                    vinfo_prefixed_keys("source", $source),
                    vinfo_prefixed_keys("parent", $parent),
                   );
  my $ret;
  $ret = system_write_raw_info($vol_received, \%raw_info);

  my @cmd_err;
  if(defined($ret)) {
    $ret = run_cmd(@cmd_pipe);
    @cmd_err = @stderr unless(defined($ret)); # save for later
  }
  else {
    push @cmd_err, "failed to write raw .info file: $vol_received->{PATH}.info", @stderr;
  }

  if(defined($ret)) {
    # Test target file for "exists and size > 0" after writing, as we
    # can not rely on the exit status of the command pipe, and a shell
    # redirection as well as "dd" always creates the target file.
    # Note that "split" does not create empty files.
    my $test_postfix = ($split ? ".split_aa" : "");
    my $check_file = "${target_path}/${target_filename}${test_postfix}";
    DEBUG "Testing target data file (non-zero size): $check_file";
    $ret = run_cmd({
      cmd  => ['test', '-s', { unsafe => $check_file } ],
      rsh  => vinfo_rsh($target),
      name => "test",
    });
    if(defined($ret)) {
      # Write raw info file again, this time wihtout incomplete flag
      delete $raw_info{INCOMPLETE};
      $ret = system_write_raw_info($vol_received, \%raw_info);
    }
    else {
      push @cmd_err, "failed to check target file (not present or zero length): $check_file";
    }
  }
  end_transaction("send-to-raw", defined($ret));
  unless(defined($ret)) {
    ERROR "Failed to send btrfs subvolume to raw file: $source->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $vol_received->{PRINT}", @cmd_err;
    return undef;
  }
  return 1;
}


sub system_list_mountinfo($)
{
  my $vol = shift // die;
  my $file = '/proc/self/mountinfo'; # NOTE: /proc/self/mounts is deprecated
  my $ret = run_cmd(cmd => [ qw(cat), $file ],
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                   );
  return undef unless(defined($ret));

  my @mountinfo;
  foreach(@$ret)
  {
    # https://www.kernel.org/doc/Documentation/filesystems/proc.txt
    unless(/^(?<mount_id>[0-9]+)       # mount ID:         unique identifier of the mount (may be reused after umount)
            \s(?<parent_id>[0-9]+)     # parent ID:        ID of parent (or of self for the top of the mount tree)
            \s(?<st_dev>[0-9]+:[0-9]+) # major:minor:      value of st_dev for files on filesystem
            \s(?<fs_root>\S+)          # root:             root of the mount within the filesystem
            \s(?<mount_point>\S+)      # mount point:      mount point relative to the process's root
            \s(?<mount_options>\S+)    # mount options:    per mount options
            (\s\S+)*                   # optional fields:  zero or more fields of the form "tag[:value]"
            \s-                        # separator:        marks the end of the optional fields
            \s(?<fs_type>\S+)          # filesystem type:  name of filesystem of the form "type[.subtype]"
            \s(?<mount_source>\S+)     # mount source:     filesystem specific information or "none"
            \s(?<super_options>\S+)$   # super options:    per super block options
           /x)
    {
      ERROR "Failed to parse \"$vol->{URL_PREFIX}$file\"";
      DEBUG "Offending line: $_";
      return undef;
    }
    my %line = %+;

    # merge super_options and mount_options to MNTOPS.
    my %mntops;
    foreach (split(',', delete($line{super_options})),
             split(',', delete($line{mount_options})))
    {
      if(/^(.+?)=(.+)$/) {
        $mntops{$1} = $2;
      } else {
        $mntops{$_} = 1;
      }
    }
    $mntops{rw} = 0 if($mntops{ro}); # e.g. mount_options="ro", super_options="rw"

    # decode values (octal, e.g. "\040" = whitespace)
    s/\\([0-7]{3})/chr(oct($1))/eg foreach(values %line, values %mntops);

    $line{MNTOPS} = \%mntops;
    push @mountinfo, \%line;
  }
  # TRACE(Data::Dumper->Dump([\@mountinfo], ["mountinfo"])) if($do_trace && $do_dumper);
  return \@mountinfo;
}


sub system_testdir($)
{
  my $vol = shift // die;
  my $path = $vol->{PATH} // die;
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "test", '-d', { unsafe => $path } ),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                   );
  return undef unless(defined($ret));
  DEBUG "Directory exists: $vol->{PRINT}";
  return 1;
}


sub system_realpath($)
{
  my $vol = shift // die;
  my $path = $vol->{PATH} // die;
  my $compat = (($vol->{HOST} && config_key($vol, "compat_remote")) //
                config_key($vol, "compat_local") //
                config_key($vol, "compat")) // "" eq "busybox";
  my @options = ("-v");                # report error messages
  push @options, "-e" unless($compat); # all components must exist (not available in busybox!)
  push @options, "-f" if($compat);     # all but the last component must exist.
  my $ret = run_cmd(cmd => vinfo_cmd($vol, "readlink", @options, { unsafe => $path } ),
                    rsh => vinfo_rsh($vol),
                    non_destructive => 1,
                   );
  return undef unless(defined($ret));

  my $realpath = scalar(@$ret) ? (check_file($ret->[0], { absolute => 1 }) // "") : "";
  unless($realpath) {
    ERROR "Failed to parse output of `realpath` for \"$vol->{PRINT}\": \"$ret->[0]\"";
    return undef;
  }
  DEBUG "Real path for \"$vol->{PRINT}\" is: $realpath";
  return undef if($compat && !system_testdir($vol));
  return $realpath;
}


sub system_mkdir($)
{
  my $vol = shift // die;
  my $path = $vol->{PATH} // die;;
  INFO "Creating directory: $vol->{PRINT}/";
  start_transaction("mkdir", vinfo_prefixed_keys("target", $vol));
  my $ret = run_cmd(cmd => [ qw(mkdir), '-p', { unsafe => $path } ],
                    rsh => vinfo_rsh($vol),
                   );
  end_transaction("mkdir", defined($ret));
  return undef unless(defined($ret));
  return 1;
}


sub btrfs_mountpoint
{
  my $vol = shift // die;
  my $autofs_retry = shift;

  DEBUG "Resolving btrfs mount point for: $vol->{PRINT}";

  # get real path
  my $realpath = $realpath_cache{$vol->{URL}};
  unless(defined($realpath)) {
    $realpath = system_realpath($vol);
    # set to empty string on errors (try only once)
    $realpath_cache{$vol->{URL}} = $realpath // "";
  }
  return undef unless($realpath);

  # get all mountpoints
  my $mountinfo = $mountinfo_cache{$vol->{MACHINE_ID}};
  TRACE "mountinfo_cache " . ($mountinfo ? "HIT" : "MISS") . ": $vol->{MACHINE_ID}" if($do_trace);
  unless($mountinfo) {
    $mountinfo = system_list_mountinfo($vol);
    return undef unless($mountinfo);
    $mountinfo_cache{$vol->{MACHINE_ID}} = $mountinfo;
  }

  # find mount point (last mountinfo entry matching realpath)
  $realpath .= '/' unless($realpath =~ /\/$/);  # correctly handle root path="/"
  my $mountpoint;
  foreach(reverse @$mountinfo) {
    my $mnt_path = $_->{mount_point};
    $mnt_path .= '/' unless($mnt_path =~ /\/$/);  # correctly handle root path="/"
    if($realpath =~ /^\Q$mnt_path\E/) {
      $mountpoint = $_;
      last;
    }
  }
  unless($mountpoint) {
    # should never happen, as "/" should always be present in mounts
    ERROR "No mount point found for: $vol->{PRINT} (realpath=\"$realpath\")";
    return undef;
  }
  TRACE "resolved mount point (mount_source=$mountpoint->{mount_source}, subvolid=" . ($mountpoint->{MNTOPS}->{subvolid} // '<undef>') . "): $mountpoint->{mount_point}" if($do_trace);

  # handle autofs
  if($mountpoint->{fs_type} eq 'autofs') {
    if($autofs_retry) {
      DEBUG "Non-btrfs autofs mount point for: $vol->{PRINT}";
      return undef;
    }
    DEBUG "Found autofs mount point, triggering automount on $mountpoint->{mount_point} for: $vol->{PRINT}";
    btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mountpoint->{mount_point}, $vol->{CONFIG}));
    $mountinfo_cache{$vol->{MACHINE_ID}} = undef;
    return btrfs_mountpoint($vol, 1);
  }
  elsif($mountpoint->{fs_type} ne 'btrfs') {
    DEBUG "No btrfs mount point found for: $vol->{PRINT}";
    return undef;
  }

  # list all mountpoints of same device
  my @same_source_mounts;
  my $mount_source_match = $mountpoint->{mount_source};
  foreach my $mnt (@$mountinfo) {
    if($mnt->{mount_source} eq $mount_source_match) {
      unless($mnt->{fs_type} eq 'btrfs') {
        # should never happen, same device should always have fs_type=btrfs
        DEBUG "Ignoring non-btrfs mount point: $mnt->{mount_source} $mnt->{mount_point} $mnt->{fs_type}";
        next;
      }
      unless($mnt->{mount_point} =~ /^$file_match$/) {
        INFO_ONCE "Ignoring non-parseable btrfs mountpoint: $vol->{MACHINE_ID}$mnt->{mount_point}";
        next;
      }
      unless($mnt->{MNTOPS}->{subvolid}) {
        # kernel <= 4.2 does not have subvolid=NN in /proc/self/mounts, read it with btrfs-progs
        DEBUG "No subvolid provided in mounts for: $mnt->{mount_point}";
        my $detail = btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mnt->{mount_point}, $vol->{CONFIG}));
        return undef unless($detail);
        $mnt->{MNTOPS}->{subvolid} = $detail->{id} || die;  # also affects %mountinfo_cache
      }
      TRACE "using btrfs mount point (mount_source=$mnt->{mount_source}, subvolid=$mnt->{MNTOPS}->{subvolid}): $mnt->{mount_point}" if($do_trace);
      push(@same_source_mounts, $mnt);
    }
  }

  DEBUG "Btrfs mount point for \"$vol->{PRINT}\": $mountpoint->{mount_point} (mount_source=$mountpoint->{mount_source}, subvolid=$mountpoint->{MNTOPS}->{subvolid})";
  return ($mountpoint->{mount_point}, $realpath, $mountpoint->{MNTOPS}->{subvolid}, $mountpoint->{mount_source}, \@same_source_mounts);
}


sub system_read_raw_info_dir($)
{
  my $droot = shift // die;
  my $ret = run_cmd(
    # NOTE: we cannot simply "cat" all files here, as it will fail if no files found
    cmd => [ 'find', { unsafe => $droot->{PATH} },
             '-maxdepth', '1',
             '-type', 'f',
             '-name', '\*.btrfs.\*info',  # match ".btrfs[.gz|bz2|xz][.gpg].info"
             '-exec', 'echo INFO_FILE=\{\} \;',
             '-exec', 'cat \{\} \;'
            ],
    rsh => vinfo_rsh($droot),
    non_destructive => 1,
   );
  unless(defined($ret)) {
    ERROR("Failed to read *.btrfs.*.info files in: $droot->{PATH}");
    return undef;
  }

  my @raw_targets;
  my $cur_target;
  foreach(@$ret)
  {
    if(/^INFO_FILE=/) {
      push @raw_targets, $cur_target if($cur_target);
      $cur_target = {};
    }
    next if /^#/; # ignore comments
    next if /^\s*$/; # ignore empty lines
    if(/^([a-zA-Z_]+)=(.*)/) {
      my ($key, $value) = ($1, $2);
      if($cur_target) {
        $cur_target->{$key} = $value;
      }
    }
  }
  push @raw_targets, $cur_target if($cur_target);

  # input validation (we need to abort here, or the backups will be resumed)
  foreach my $raw_info (@raw_targets) {
    unless($raw_info->{INFO_FILE}) {
      ERROR("Error while parsing command output for: $droot->{PATH}");
      return undef;
    }
    unless($raw_info->{FILE}) {
      ERROR("Missing \"FILE=\" in raw info file: " . $raw_info->{INFO_FILE});
      return undef;
    }
    unless(check_file($raw_info->{FILE}, { name_only => 1 })) {
      ERROR("Ambiguous \"FILE=\" in raw info file: " . $raw_info->{INFO_FILE});
      return undef;
    }
    unless($raw_info->{TYPE} && ($raw_info->{TYPE} eq 'raw')) {
      ERROR("Unsupported \"type\" in raw info file: " . $raw_info->{INFO_FILE});
      return undef;
    }
    unless($raw_info->{RECEIVED_UUID} && ($raw_info->{RECEIVED_UUID} =~ /^$uuid_match$/)) {
      ERROR("Missing/Illegal \"received_uuid\" in raw info file: " . $raw_info->{INFO_FILE});
      return undef;
    }
    if(defined $raw_info->{RECEIVED_PARENT_UUID}) {
      unless(($raw_info->{RECEIVED_PARENT_UUID} eq '-') || ($raw_info->{RECEIVED_PARENT_UUID} =~ /^$uuid_match$/)) {
        ERROR("Illegal \"RECEIVED_PARENT_UUID\" in raw info file: " . $raw_info->{INFO_FILE});
        return undef;
      }
    }
    else {
      $raw_info->{RECEIVED_PARENT_UUID} = '-';
    }
  }

  DEBUG("Parsed " . @raw_targets . " raw info files in path: $droot->{PATH}");
  TRACE(Data::Dumper->Dump([\@raw_targets], ["system_read_raw_info_dir($droot->{URL})"])) if($do_trace && $do_dumper);

  #
  # read DEPRECATED raw format (btrbk < v0.26.0)
  #
  $ret = run_cmd(
    cmd => [ 'find', { unsafe => $droot->{PATH} . '/' }, '-maxdepth', '1', '-type', 'f' ],
    rsh => vinfo_rsh($droot),
    non_destructive => 1,
   );
  unless(defined($ret)) {
    ERROR("Failed to list files from: $droot->{PATH}");
    return undef;
  }
  my $deprecated_found = 0;
  foreach(@$ret)
  {
    unless(/^($file_match)$/) {
      DEBUG "Skipping non-parseable file: \"$_\"";
      next;
    }
    my $file = $1; # untaint argument
    unless($file =~ s/^\Q$droot->{PATH}\E\///) {
      ERROR("Unexpected result from 'find': file \"$file\" is not under \"$droot->{PATH}\"");
      return undef;
    }
    if($file =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match_DEPRECATED$/) {
      push @raw_targets, {
        # NOTE: if INFO_FILE is not present, this raw target is treated as deprecated format
        TYPE                 => 'raw',
        FILE                 => $file,
        RECEIVED_UUID        => $+{received_uuid} // die,
        RECEIVED_PARENT_UUID => $+{parent_uuid} // '-',
        INCOMPLETE           => $+{incomplete} ? 1 : 0,
        encrypt              => $+{encrypt} // "",
        compress             => $+{compress} // "",
      };
      $deprecated_found++;
    }
  }
  DEBUG("Parsed $deprecated_found deprecated raw backup files in path: $droot->{PATH}");
  if($deprecated_found) {
    WARN("Found $deprecated_found raw backup files with deprecated file format in: $droot->{PRINT}");
    WARN("Please convert the raw backup files using the `raw_suffix2sidecar` utility.");
  }

  return \@raw_targets;
}


sub system_write_raw_info($$)
{
  my $vol = shift // die;
  my $raw_info = shift // die;

  my $info_file = $vol->{PATH} . '.info';
  my @line;
  push @line, "#btrbk-v$VERSION";
  push @line, "# Do not edit this file";

  # sort by %raw_info_sort, then by key
  foreach(sort { (($raw_info_sort{$a} || 99) <=> ($raw_info_sort{$b} || 99)) || ($a cmp $b) } keys %$raw_info) {
    push @line, ($_ . '=' . $raw_info->{$_});
  }

  DEBUG "Creating raw info file " . ($raw_info->{INCOMPLETE} ? "(incomplete)" : "(complete)") . ": $info_file";
  my $echo_text = (join '\n', @line);
  TRACE "DUMP INFO_FILE=$info_file\n" . join("\n", @line) if($do_trace);

  my $ret = run_cmd(
    { cmd => [ 'echo', '-e', '-n', '"' . (join '\n', @line) . '\n"' ] },
    { redirect_to_file => { unsafe => $info_file },
      rsh => vinfo_rsh($vol),
    });
  return undef unless(defined($ret));

  return $info_file;
}


sub system_urandom($;$) {
  my $size = shift;
  my $format = shift || 'hex';
  die unless(($size > 0) && ($size <= 256)); # sanity check

  unless(open(URANDOM, '<', '/dev/urandom')) {
    ERROR "Failed to open /dev/urandom: $!";
    return undef;
  }
  binmode URANDOM;
  my $rand;
  my $rlen = read(URANDOM, $rand, $size);
  close(FILE);
  unless(defined($rand) && ($rlen == $size)) {
    ERROR "Failed to read from /dev/urandom: $!";
    return undef;
  }

  if($format eq 'hex') {
    my $hex = unpack('H*', $rand);
    die unless(length($hex) == ($size * 2)); # paranoia check
    return $hex;
  }
  elsif($format eq 'bin') {
    return $rand;
  }
  die "unsupported format";
}

sub read_extentmap_cache($)
{
  my $vol = shift;
  my $cache_dir = config_key($vol, 'cache_dir');
  return undef unless($cache_dir);
  my $uuid = $vol->{node}{uuid} // die;
  foreach (@$cache_dir) {
    my $file = "$_/${uuid}.extentmap.bin";
    next unless (-f $file);

    DEBUG "Reading extentmap cache: $file";
    if(open(my $fh, '<:raw', $file)) {
      my @range;
      my $buf;
      read($fh, $buf, 24 + 8 * 2); # read header
      my ($v, $gen, $time) = unpack('a24Q<Q<', $buf);
      unless(($v =~ /^btrbk_extentmap_v1/) && $gen && $time) {
        ERROR "Ambigous cache file: $file";
        next;
      }
      if($gen != $vol->{node}{gen}) {
        WARN "Subvolume generation has changed (cache=$gen, subvol=$vol->{node}{gen}), ignoring cache: $file";
        next;
      }
      while(read $fh, $buf, 8 * 2) { # read / unpack two words
        push @range, [ unpack('Q<Q<', $buf) ];
        #TRACE "read_extentmap_cache: range " . join("..", @{$range[-1]});
      };
      DEBUG "Read " . scalar(@range) . " regions (gen=$gen, timestamp='" . localtime($time) . "') from: $file";
      return \@range;
    } else {
      ERROR "Failed to open '$file': $!";
    }
  }
  return undef;
}

sub write_extentmap_cache($)
{
  my $vol = shift;
  my $extmap = $vol->{EXTENTMAP};
  my $cache_dir = config_key($vol, 'cache_dir');
  return undef unless($extmap && $cache_dir);
  my $uuid = $vol->{node}{uuid} // die;
  foreach (@$cache_dir) {
    unless(-d $_) {
      WARN_ONCE "Ignoring cache_dir (not a directory): $_";
      next;
    }
    my $file = "$_/${uuid}.extentmap.bin";

    INFO "Writing extentmap cache: $file";
    if(open(my $fh, '>:raw', $file)) {
      # pack Q: unsigned quad (64bit, Documentation/filesystems/fiemap.txt)
      print $fh pack('a24Q<Q<', "btrbk_extentmap_v1", $vol->{node}{gen}, time);
      print $fh pack('Q<*', map(@{$_}, @$extmap));
      close($fh);
    } else {
      ERROR "Failed to create '$file': $!";
    }
  }
}

# returns extents range (sorted array of [start,end], inclusive) from FIEMAP ioctl
sub filefrag_extentmap($)
{
  my $vol = shift || die;
  my $starttime = time;

  INFO("Fetching extent map (filefrag): $vol->{PRINT}");

  # NOTE: this returns exitstatus=0 if file is not found, or no files found
  my $ret = run_cmd({ cmd => [ 'find', { unsafe => $vol->{PATH} }, '-xdev', '-type', 'f',
                               '-exec', 'filefrag -b1 -v \{\} +' ],
                      large_output => 1});
  unless(defined($ret)) {
    ERROR "Failed to fetch extent map: $vol->{PRINT}", @stderr;
    return undef;
  }
  WARN_ONCE "Configuration option \"ignore_extent_data_inline=no\" not available for filefrag (please install \"IO::AIO\" perl module)" unless(config_key($vol, "ignore_extent_data_inline"));

  my @range; # array of [start,end]
  foreach (@$ret) {
    #my $file = $1 if(/^File size of (.*?) is/);
    if(/^\s*[0-9]+:\s*[0-9]+\.\.\s*[0-9]+:\s*([0-9]+)\.\.\s*([0-9]+):/) {
      # NOTE: filefrag (v1.45.5) returns wrong (?) physical_offset for
      #       "inline" regions unless run with `-b1` (blocksize=1) option.
      #
      # For btrfs file systems it does not make much sense to consider
      # the "inline" extents anyways: these are stored in metadata
      # section and are not really part of the used disk space.
      #
      # # filefrag -v MYFILE
      # File size of MYFILE is 2307 (1 block of 4096 bytes)
      #  ext:     logical_offset:        physical_offset: length:   expected: flags:
      #    0:        0..    4095:          0..      4095:   4096:             last,not_aligned,inline,eof
      # # filefrag -v -b1 MYFILE
      # File size of MYFILE is 2307 (4096 block of 1 bytes)
      #  ext:     logical_offset:        physical_offset: length:   expected: flags:
      #    0:        0..    4095:          0..      4095:   4096:             last,not_aligned,inline,eof
      next if(/inline/);
      push @range, [ $1, $2 ];
    }
  }
  DEBUG("Parsed " . scalar(@range) . " regions in " . (time - $starttime) . "s for: $vol->{PRINT}");
  return extentmap_merge(\@range);
}


# returns extents range (sorted array of [start,end], inclusive) from FIEMAP ioctl
sub aio_extentmap($)
{
  my $vol = shift || die;
  my $starttime = time;
  my $ignore_inline = config_key($vol, "ignore_extent_data_inline");

  INFO("Fetching extent map: $vol->{PRINT}");

  # NOTE: this returns exitstatus=0 if file is not found, or no files found
  my $ret = run_cmd( cmd => [ 'find', { unsafe => $vol->{PATH} }, '-xdev', '-type', 'f' ],
                     large_output => 1 );
  unless(defined($ret)) {
    ERROR "Failed to find files in: $vol->{PRINT}", @stderr;
    return undef;
  }

  DEBUG("Reading ioctl FIEMAP of " . scalar(@$ret) . " files");

  IO::AIO::max_outstanding(128);  # < 1024 (max file descriptors)
  IO::AIO::max_poll_reqs(32);

  my @range;
  my $count = 0;
  my $inline_count = 0;
  foreach my $file (@$ret) {
    IO::AIO::aio_open($file, IO::AIO::O_RDONLY(), 0, sub {
      # graceful abort on file open errors (check $count below)
      return unless($_[0]); # [ $fh ]

      # note: aio_fiemap returns byte range (not blocks)
      # see: Documentation/filesystems/fiemap.rst
      IO::AIO::aio_fiemap($_[0], 0, undef, 0, undef, sub {
        $count++;
        foreach(@{$_[0]}) { # [ $logical, $physical, $length, $flags ]
          if($_->[3] & IO::AIO::FIEMAP_EXTENT_DATA_INLINE()) {
            $inline_count++;
            next if($ignore_inline);
            WARN_ONCE "Ambigous inline region [$_->[1] .. $_->[1] + $_->[2] - 1] for $file" if((($_->[1] != 0) || ($_->[2] != 4096)));
          }
          push @range, [ $_->[1], $_->[1] + $_->[2] - 1 ];
        }
      });
    });
    # poll, or the above eats up all our filedescriptors
    IO::AIO::poll_cb(); # takes "max_outstanding" and "max_poll_reqs" settings
  }

  IO::AIO::flush();

  WARN "Failed to open $count / " . scalar(@$ret) . " files" if($count != scalar(@$ret));

  DEBUG("Parsed " . scalar(@range) . " regions (" . ($ignore_inline ? "ignored " : "") . "$inline_count \"inline\") for $count files in " . (time - $starttime) . "s for: $vol->{PRINT}");
  return extentmap_merge(\@range);
}


sub extentmap_total_blocks($)
{
  my $extmap = shift;
  my $count = 0;
  foreach(@{$extmap->{rmap}}) {
    $count += ($_->[1] - $_->[0] + 1);
  }
  return $count;
}


sub extentmap_size($)
{
  my $extmap = shift; # merged ranges
  return undef unless($extmap);
  my $size = 0;
  foreach(@$extmap) {
    $size += $_->[1] - $_->[0] + 1;
  }
  return $size;
}


sub extentmap_merge(@) {
  return undef unless(scalar(@_));
  my @range = sort { $a->[0] <=> $b->[0] } map @$_, @_;
  my @merged;
  my $start = -1;
  my $end = -2;
  foreach (@range) {
    if($_->[0] <= $end + 1) {
      # range overlaps the preceeding one, or is adjacent to it
      $end = $_->[1] if($_->[1] > $end);
    }
    else {
      push @merged, [ $start, $end ] if($start >= 0);
      $start = $_->[0];
      $end = $_->[1];
    }
  }
  push @merged, [ $start, $end ] if($start >= 0);
  DEBUG "extentmap: merged " . scalar(@range) . " regions into " . scalar(@merged) . " regions";
  return \@merged;
}


# ( A \ B ) : data in A that is not in B (relative complement of B in A)
sub extentmap_diff($$)
{
  my $l = shift // die; # A, sorted
  my $r = shift;        # B, sorted
  return $l unless($r); # A \ 0 = A

  my $i = 0;
  my $rn = scalar(@$r);
  my @diff;

  foreach(@$l) {
    my $l_start = $_->[0];
    my $l_end   = $_->[1];
    while(($i < $rn) && ($r->[$i][1] < $l_start)) { # r_end < l_start
      # advance r to next overlapping
      $i++;
    }
    while(($i < $rn) && ($r->[$i][0] <= $l_end)) { # r_start <= l_end
      # while overlapping, advance l_start
      my $r_start = $r->[$i][0];
      my $r_end   = $r->[$i][1];

      push @diff, [ $l_start, $r_start - 1 ] if($l_start < $r_start);
      $l_start = $r_end + 1;
      last if($l_start > $l_end);
      $i++;
    }
    push @diff, [ $l_start, $l_end ] if($l_start <= $l_end);
  }
  DEBUG "extentmap: relative complement ( B=" . scalar(@$r) . ' \ A=' . scalar(@$l) . " ) = " . scalar(@diff) . " regions";
  return \@diff;
}


sub btr_tree($$$$)
{
  my $vol = shift;
  my $vol_root_id = shift || die;
  my $mount_source = shift || die;  # aka device
  my $mountpoints = shift || die; # all known mountpoints for this filesystem: arrayref of mountinfo
  die unless($vol_root_id >= 5);

  # return parsed tree from %mount_source_cache if present
  my $host_mount_source = $vol->{URL_PREFIX} . $mount_source;
  my $cached_tree = $mount_source_cache{$host_mount_source};
  TRACE "mount_source_cache " . ($cached_tree ? "HIT" : "MISS") . ": $host_mount_source" if($do_trace);
  if($cached_tree) {
    TRACE "btr_tree: returning cached tree at id=$vol_root_id" if($do_trace);
    my $node = $cached_tree->{ID_HASH}{$vol_root_id};
    ERROR "Unknown subvolid=$vol_root_id in btrfs tree of $host_mount_source" unless($node);
    return $node;
  }

  my $node_list = btrfs_subvolume_list_complete($vol);
  return undef unless(ref($node_list) eq "ARRAY");
  my $vol_root;

  TRACE "btr_tree: processing subvolume list of: $vol->{PRINT}" if($do_trace);

  # return a reference to the cached root if we already know the tree,
  # making sure every tree is only stored once, which is essential
  # e.g. when injecting nodes. die if duplicate UUID exist on
  # different file systems (no matter if local or remote).
  #
  # note: this relies on subvolume UUID's to be "universally unique"
  # (which is why cloning btrfs filesystems using "dd" is a bad idea)
  #
  # note: a better way would be to always compare the UUID of
  # subvolid=5. unfortunately this is not possible for filesystems
  # created with btrfs-progs < 4.16 (no UUID for subvolid=5).
  foreach(@$node_list) {
    my $node_uuid = $_->{uuid};
    next unless($node_uuid);
    if($uuid_cache{$node_uuid}) {
      # at least one uuid of $node_list is already known
      TRACE "uuid_cache HIT: $node_uuid" if($do_trace);
      $vol_root = $uuid_cache{$node_uuid}->{TREE_ROOT}->{ID_HASH}->{$vol_root_id};
      die "Duplicate UUID on different file systems"  unless($vol_root);
      INFO "Assuming same filesystem: \"$vol_root->{TREE_ROOT}->{host_mount_source}\", \"$host_mount_source\"";
      TRACE "btr_tree: returning already parsed tree at id=$vol_root->{id}" if($do_trace);
      $mount_source_cache{$host_mount_source} = $vol_root->{TREE_ROOT};
      return $vol_root;
    }
    last; # check only first UUID (for performance)
  }

  # fill our hashes and uuid_cache
  my %id;
  my %uuid_hash;
  my %received_uuid_hash;
  my %parent_uuid_hash;
  my $gen_max = 0;
  foreach my $node (@$node_list) {
    my $node_id = $node->{id};
    my $node_uuid = $node->{uuid};
    die unless($node_id >= 5);
    die "duplicate node id" if(exists($id{$node_id}));
    $id{$node_id} = $node;
    if($node_uuid) {
      # NOTE: uuid on btrfs root (id=5) is not always present
      $uuid_hash{$node_uuid} = $node;
      $uuid_cache{$node_uuid} = $node;
      # hacky: if root node has no "uuid", it also has no "received_uuid" and no "gen"
      push(@{$received_uuid_hash{$node->{received_uuid}}}, $node) if($node->{received_uuid} ne '-');
      push(@{$parent_uuid_hash{$node->{parent_uuid}}}, $node) if($node->{parent_uuid} ne '-');
      $gen_max = $node->{gen} if($node->{gen} > $gen_max);
    }
    elsif(not $node->{is_root}) {
      die "missing uuid on subvolume";
    }
    $node->{SUBTREE} = [];
  }
  my $tree_root = $id{5} // die "missing btrfs root";
  $tree_root->{ID_HASH} = \%id;
  $tree_root->{UUID_HASH} = \%uuid_hash;
  $tree_root->{RECEIVED_UUID_HASH} = \%received_uuid_hash;
  $tree_root->{PARENT_UUID_HASH} = \%parent_uuid_hash;
  $tree_root->{GEN_MAX} = $gen_max;
  $tree_root->{URL_PREFIX} = $vol->{URL_PREFIX}; # hacky, first url prefix for logging

  # NOTE: host_mount_source is NOT dependent on MACHINE_ID:
  # if we return already present tree (see above), the value of
  # host_mount_source will still point to the mount_source of the
  # first machine.
  $tree_root->{mount_source} = $mount_source;
  $tree_root->{host_mount_source} = $host_mount_source; # unique identifier, e.g. "/dev/sda1" or "ssh://hostname[:port]/dev/sda1"

  $vol_root = $id{$vol_root_id};
  unless($vol_root) {
    ERROR "Failed to resolve tree root for subvolid=$vol_root_id: " . ($vol->{PRINT} // $vol->{id});
    return undef;
  }

  # set REL_PATH and tree references (TREE_ROOT, SUBTREE, TOP_LEVEL)
  foreach my $node (@$node_list) {
    unless($node->{is_root}) {
      # note: it is possible that id < top_level, e.g. after restoring
      my $top_level = $id{$node->{top_level}};
      die "missing top_level reference" unless(defined($top_level));

      push(@{$top_level->{SUBTREE}}, $node);
      $node->{TOP_LEVEL} = $top_level;

      # "path" always starts with set REL_PATH
      my $rel_path = $node->{path};
      unless($top_level->{is_root}) {
        die unless($rel_path =~ s/^\Q$top_level->{path}\E\///);
      }
      $node->{REL_PATH} = $rel_path;  # relative to {TOP_LEVEL}->{path}
    }
    $node->{TREE_ROOT} = $tree_root;
    add_btrbk_filename_info($node);
  }

  # add known mountpoints to nodes
  my %mountpoints_hash;
  foreach(@$mountpoints) {
    my $node_id = $_->{MNTOPS}{subvolid};
    my $node = $id{$node_id};
    unless($node) {
      WARN "Unknown subvolid=$node_id (in btrfs tree of $host_mount_source) for mountpoint: $vol->{URL_PREFIX}$_->{mount_point}";
      next;
    }
    $mountpoints_hash{$node_id} = $node;
    push @{$node->{MOUNTINFO}}, $_;  # if present, node is mounted at MOUNTINFO
  }
  $tree_root->{MOUNTED_NODES} = [ (values %mountpoints_hash) ]; # list of mounted nodes

  TRACE "btr_tree: returning tree at id=$vol_root->{id}" if($do_trace);
  VINFO($vol_root, "node") if($loglevel >=4);

  $mount_source_cache{$host_mount_source} = $tree_root;
  return $vol_root;
}


sub btr_tree_inject_node($$$)
{
  my $top_node = shift;
  my $detail = shift;
  my $rel_path = shift;
  my $subtree = $top_node->{SUBTREE} // die;
  my $tree_root = $top_node->{TREE_ROOT};

  die unless($detail->{parent_uuid} && $detail->{received_uuid} && exists($detail->{readonly}));

  $tree_inject_id -= 1;
  $tree_root->{GEN_MAX} += 1;

  my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
  my $node = {
    %$detail, # make a copy
    TREE_ROOT => $tree_root,
    SUBTREE   => [],
    TOP_LEVEL => $top_node,
    REL_PATH  => $rel_path,
    INJECTED  => 1,
    id        => $tree_inject_id,
    uuid      => $uuid,
    gen       => $tree_root->{GEN_MAX},
    cgen      => $tree_root->{GEN_MAX},
  };
  push(@$subtree, $node);
  $uuid_cache{$uuid} = $node;
  $tree_root->{ID_HASH}->{$tree_inject_id} = $node;
  $tree_root->{UUID_HASH}->{$uuid} = $node;
  push( @{$tree_root->{RECEIVED_UUID_HASH}->{$node->{received_uuid}}}, $node ) if($node->{received_uuid} ne '-');
  push( @{$tree_root->{PARENT_UUID_HASH}->{$node->{parent_uuid}}}, $node ) if($node->{parent_uuid} ne '-');
  return $node;
}


# returns array of { path, mountinfo }
# NOTE: includes subvolumes hidden by other mountpoint
sub __fs_info
{
  my $node = shift;
  my $url_prefix = shift;
  my @ret = $node->{MOUNTINFO} ? map +{ path => $url_prefix . $_->{mount_point}, mountinfo => $_ }, @{$node->{MOUNTINFO}} : ();
  return @ret if($node->{is_root});
  return ((map +{ path => $_->{path} . '/' . $node->{REL_PATH}, mountinfo => $_->{mountinfo} }, __fs_info($node->{TOP_LEVEL}, $url_prefix)), @ret);
}

sub _fs_info
{
  my $node = shift // die;
  my $url_prefix = shift // $node->{TREE_ROOT}{URL_PREFIX};
  my @ret = __fs_info($node, $url_prefix);
  @ret = ({ path => "$url_prefix<$node->{TREE_ROOT}{mount_source}>/$node->{path}",
            mountinfo => undef }) unless(scalar(@ret));
  return @ret;
}

sub _fs_path
{
  my @ret = map $_->{path}, _fs_info(@_);
  return wantarray ? @ret : $ret[0];
}


sub _is_correlated($$)
{
  my $a = shift; # node a
  my $b = shift; # node b
  return 0 if($a->{is_root} || $b->{is_root});
  return 0 unless($a->{readonly} && $b->{readonly});
  return (($a->{uuid} eq $b->{received_uuid}) ||
          ($b->{uuid} eq $a->{received_uuid}) ||
          (($a->{received_uuid} ne '-') && ($a->{received_uuid} eq $b->{received_uuid})));
}


sub _is_same_fs_tree($$)
{
  return ($_[0]->{TREE_ROOT}{host_mount_source} eq $_[1]->{TREE_ROOT}{host_mount_source});
}


sub _is_child_of
{
  my $node = shift;
  my $uuid = shift;
  foreach(@{$node->{SUBTREE}}) {
    return 1 if($_->{uuid} eq $uuid);
    return 1 if(_is_child_of($_, $uuid));
  }
  return 0;
}


sub _get_longest_match
{
  my $node = shift;
  my $path = shift;
  my $check_path = shift;  # MUST have a trailing slash
  $path .= '/' unless($path =~ /\/$/);  # correctly handle root path="/"

  return undef unless($check_path =~ /^\Q$path\E/);
  foreach(@{$node->{SUBTREE}}) {
    my $ret = _get_longest_match($_, $path . $_->{REL_PATH}, $check_path);
    return $ret if($ret);
  }
  return { node => $node,
           path => $path };
}


sub vinfo($$)
{
  my $url = shift // die;
  my $config = shift;

  my ($url_prefix, $path) = check_url($url);
  die "invalid url: $url" unless(defined($path));
  my $print = $path;
  my $name = $path;
  $name =~ s/^.*\///;
  $name = '/' if($name eq "");

  my $host = undef;
  my $port = undef;
  if($url_prefix) {
    $host = $url_prefix;
    die unless($host =~ s/^ssh:\/\///);
    $port = $1 if($host =~ s/:([1-9][0-9]*)$//);
    $print = $host . (defined($port) ? "[$port]:" : ":") . $path;
    $host =~ s/^\[//; # remove brackets from ipv6_addr
    $host =~ s/\]$//; # remove brackets from ipv6_addr
  }

  # Note that PATH and URL have no trailing slash, except if "/".
  # Note that URL and URL_PREFIX can contain ipv6 address in brackets (e.g. "[::1]").
  return {
    HOST       => $host,                   # hostname|ipv4_address|ipv6_address|<undef>
    PORT       => $port,                   # port|<undef>
    NAME       => $name,
    PATH       => $path,
    PRINT      => $print,                  # "hostname:/path" or "hostname[port]:/path"
    URL        => $url_prefix . $path,     # ssh://hostname[:port]/path
    URL_PREFIX => $url_prefix,             # ssh://hostname[:port]  (or "" if local)
    MACHINE_ID => $url_prefix || "LOCAL:", # unique: "LOCAL:" or hostname and port
    CONFIG     => $config,
  }
}


sub vinfo_child($$;$)
{
  my $parent = shift || die;
  my $rel_path = shift // die;
  my $config = shift;   # override parent config
  my $name = $rel_path;
  my $subvol_dir = "";
  $subvol_dir = $1 if($name =~ s/^(.*)\///);

  # Note that PATH and URL intentionally contain "//" if $parent->{PATH} = "/".
  my $vinfo = {
    HOST         => $parent->{HOST},
    PORT         => $parent->{PORT},
    NAME         => $name,
    PATH         => "$parent->{PATH}/$rel_path",
    PRINT        => "$parent->{PRINT}" . ($parent->{PRINT} =~ /\/$/ ? "" : "/") . $rel_path,
    URL          => "$parent->{URL}/$rel_path",
    URL_PREFIX   => $parent->{URL_PREFIX},
    MACHINE_ID   => $parent->{MACHINE_ID},
    SUBVOL_PATH  => $rel_path,
    SUBVOL_DIR   => $subvol_dir,  # SUBVOL_PATH=SUBVOL_DIR/NAME
    CONFIG       => $config // $parent->{CONFIG},
    VINFO_MOUNTPOINT => $parent->{VINFO_MOUNTPOINT},
   };

  # TRACE "vinfo_child: created from \"$parent->{PRINT}\": $info{PRINT}" if($do_trace);
  return $vinfo;
}


sub vinfo_rsh($;@)
{
  my $vinfo = shift || die;
  my %opts = @_;
  my $host = $vinfo->{HOST};
  return undef unless(defined($host));

  my $config = $vinfo->{CONFIG};
  die unless($config);

  # as of btrbk-0.28.0, ssh port is a property of a "vinfo", set with
  # "ssh://hostname[:port]" in 'volume' and 'target' sections. Note
  # that the port number is also used for the MACHINE_ID to
  # distinguish virtual machines on same host with different ports.
  my $ssh_port = $vinfo->{PORT};
  unless($ssh_port) {
    # PORT defaults to ssh_port (DEPRECATED)
    $ssh_port = config_key($config, "ssh_port") // "default";
    $ssh_port = undef if($ssh_port eq "default");
  }
  my $ssh_user        = config_key($config, "ssh_user");
  my $ssh_identity    = config_key($config, "ssh_identity");
  my $ssh_compression = config_key($config, "ssh_compression");
  my $ssh_cipher_spec = config_key($config, "ssh_cipher_spec") // "default";
  my @ssh_options; # as of btrbk-0.29.0, we run ssh without -q (catching @stderr)
  push(@ssh_options, '-p', $ssh_port) if($ssh_port);
  push(@ssh_options, '-c', $ssh_cipher_spec) if($ssh_cipher_spec ne "default");
  if($ssh_identity) {
    push(@ssh_options, '-i', $ssh_identity);
  } else {
    WARN_ONCE "No SSH identity provided (option ssh_identity is not set) for: " . ($vinfo->{CONFIG}->{url} // $vinfo->{PRINT});
  }
  if($opts{disable_compression}) {
    push(@ssh_options, '-o', 'compression=no');  # force ssh compression=no (in case it is defined in ssh_config)
  } elsif($ssh_compression) {
    push(@ssh_options, '-C');
  }
  return ['ssh', @ssh_options, $ssh_user . '@' . $host ];
}


sub vinfo_cmd($$@)
{
  my $vinfo = shift || die;
  my $cmd = shift || die;
  my @cmd_args = @_;
  my $ret;
  my $backend;
  if(defined($vinfo->{HOST})) {
    $backend //= config_key($vinfo, "backend_remote");
  } else {
    $backend //= config_key($vinfo, "backend_local_user") if($>); # $EUID, $EFFECTIVE_USER_ID
    $backend //= config_key($vinfo, "backend_local");
  }
  $backend //= config_key($vinfo, "backend") // die;
  my $cmd_mapped = $backend_cmd_map{$backend}{$cmd};
  if(defined($cmd_mapped)) {
    TRACE "vinfo_cmd: found mapping for backend=$backend cmd=\"$cmd\": " . join(' ', @$cmd_mapped) if($do_trace);
    $ret = [ @$cmd_mapped, @cmd_args ];
  }
  else {
    my @ret_cmd = split(/\s/, $cmd);
    TRACE "vinfo_cmd: no mapping found for backend=$backend cmd=\"$cmd\", assuming btrfs-progs: " . join(' ', @ret_cmd) if($do_trace);
    $ret = [ @ret_cmd, @cmd_args ];
  }
  return $ret;
}

sub _get_btrbk_date(@)
{
  my %bts = @_; # named capture buffers (%+) from $btrbk_timestamp_match

  my @tm = ( ($+{ss} // 0), ($+{mm} // 0), ($+{hh} // 0), $+{DD}, ($+{MM} - 1), ($+{YYYY} - 1900) );
  my $NN = $+{NN} // 0;
  my $zz = $+{zz};
  my $has_exact_time = defined($+{hh}); # false if timestamp_format=short

  my $time;
  if(defined($zz)) {
    eval_quiet { $time = timegm(@tm); };
  } else {
    eval_quiet { $time = timelocal(@tm); };
  }
  unless(defined($time)) {
    # WARN "$@"; # sadly Time::Local croaks, which also prints the line number from here.
    return undef;
  }

  # handle ISO 8601  time offset
  if(defined($zz)) {
    my $offset;
    if($zz eq 'Z') {
      $offset = 0;  # Zulu time == UTC
    }
    elsif($zz =~ /^([+-])([0-9][0-9])([0-9][0-9])$/) {
      $offset = ( $3 * 60 ) + ( $2 * 60 * 60 );
      $offset *= -1 if($1 eq '-');
    }
    else {
      return undef;
    }
    $time -= $offset;
  }

  return [ $time, $NN, $has_exact_time ];
}

sub add_btrbk_filename_info($;$)
{
  my $node = shift;
  my $raw_info = shift;
  my $name = $node->{REL_PATH};
  return undef unless(defined($name));

  # NOTE: unless long-iso file format is encountered, the timestamp is interpreted in local timezone.

  $name =~ s/^(.*)\///;
  if($raw_info && ($name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match$/)) { ; }
  elsif($raw_info && $name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match_DEPRECATED$/) { ; } # DEPRECATED raw format
  elsif((not $raw_info) && ($name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$/)) { ; }
  else {
    return undef;
  }
  $name = $+{name} // die;
  my $btrbk_date = _get_btrbk_date(%+); # use named capture buffers of previous match
  unless($btrbk_date) {
    WARN "Illegal timestamp on subvolume \"$node->{REL_PATH}\", ignoring";
    return undef;
  }

  $node->{BTRBK_BASENAME} = $name;
  $node->{BTRBK_DATE} = $btrbk_date;
  $node->{BTRBK_RAW} = $raw_info if($raw_info);
  return $node;
}


sub vinfo_init_root($)
{
  my $vol = shift || die;

  # resolve btrfs tree from mount point
  @stderr = (); # clear @stderr (propagated for logging)
  my ($mnt_path, $real_path, $subvolid, $mount_source, $mountpoints) = btrfs_mountpoint($vol);
  return undef unless($mnt_path && $real_path && $subvolid);

  # read btrfs tree for the mount point
  @stderr = (); # clear @stderr (propagated for logging)
  my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
  my $mnt_tree_root = btr_tree($mnt_vol, $subvolid, $mount_source, $mountpoints);
  return undef unless($mnt_tree_root);

  # find longest match in btrfs tree
  my $ret = _get_longest_match($mnt_tree_root, $mnt_path, $real_path) // die;
  my $tree_root = $ret->{node};
  return undef unless($tree_root);

  # set NODE_SUBDIR if $vol->{PATH} points to a regular (non-subvolume) directory.
  # in other words, "PATH=<path_to_subvolume>/NODE_SUBDIR"
  my $node_subdir = $real_path;
  die unless($node_subdir =~ s/^\Q$ret->{path}\E//);  # NOTE: $ret->{path} has trailing slash!
  $node_subdir =~ s/\/+$//;
  $vol->{NODE_SUBDIR} = $node_subdir if($node_subdir ne '');
  $vol->{node} = $tree_root;

  $vol->{VINFO_MOUNTPOINT} = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
  $vol->{VINFO_MOUNTPOINT}{node} = $mnt_tree_root;

  return $tree_root;
}


sub vinfo_init_raw_root($;@)
{
  my $droot = shift || die;
  my $tree_root = $raw_url_cache{$droot->{URL}};
  TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": URL=$droot->{URL}" if($do_trace);
  unless($tree_root) {
    if(my $real_path = $realpath_cache{$droot->{URL}}) {
      my $real_url = $droot->{URL_PREFIX} . $real_path;
      $tree_root = $raw_url_cache{$real_url};
      TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": REAL_URL=$real_url" if($do_trace);
    }
  }

  unless($tree_root) {
    DEBUG "Creating raw subvolume list: $droot->{PRINT}";

    # create fake btr_tree
    $tree_root = { id => 5,
                   is_root => 1,
                   mount_source => '@raw_tree',                      # for _fs_path (logging)
                   host_mount_source => $droot->{URL} . '@raw_tree', # for completeness (this is never used)
                   GEN_MAX => 1,
                   SUBTREE => [],
                   UUID_HASH => {},
                   RECEIVED_UUID_HASH => {},
                   URL_PREFIX => $droot->{URL_PREFIX},                 # for _fs_path (logging)
                   MOUNTINFO => [ { mount_point => $droot->{PATH} } ], # for _fs_path (logging)
                 };
    $tree_root->{TREE_ROOT} = $tree_root;

    # list and parse *.info
    my $raw_info_ary = system_read_raw_info_dir($droot);
    return undef unless($raw_info_ary);

    # inject nodes to fake btr_tree
    $droot->{node} = $tree_root;
    my %child_uuid_list;
    foreach my $raw_info (@$raw_info_ary)
    {
      # Set btrfs subvolume information (received_uuid, parent_uuid) from filename info.
      #
      # NOTE: received_parent_uuid in BTRBK_RAW is the "parent of the source subvolume", NOT the
      #       "parent of the received subvolume".
      my $subvol = vinfo_child($droot, $raw_info->{FILE});
      unless(vinfo_inject_child($droot, $subvol, {
        TARGET_TYPE   => $raw_info->{TYPE},
        parent_uuid   => '-', # NOTE: correct value gets inserted below
        # Incomplete raw fakes get same semantics as real subvolumes (readonly=0, received_uuid='-')
        received_uuid => ($raw_info->{INCOMPLETE} ? '-' : $raw_info->{RECEIVED_UUID}),
        readonly      => ($raw_info->{INCOMPLETE} ? 0 : 1),
      }, $raw_info))
      {
        if($raw_info->{INFO_FILE}) {
          ERROR("Ambiguous \"FILE=\" in raw info file: \"$raw_info->{INFO_FILE}\"");
        } else {
          # DEPRECATED raw format
          ERROR("Ambiguous file: \"$raw_info->{FILE}\"");
        }
        return undef;
      }

      if($raw_info->{RECEIVED_PARENT_UUID} ne '-') {
        $child_uuid_list{$raw_info->{RECEIVED_PARENT_UUID}} //= [];
        push @{$child_uuid_list{$raw_info->{RECEIVED_PARENT_UUID}}}, $subvol;
      }
    }

    my @subvol_list = @{vinfo_subvol_list($droot, sort => 'path')};
    DEBUG "Found " . scalar(@subvol_list) . " raw subvolume backups in: $droot->{PRINT}";

    foreach my $subvol (@subvol_list)
    {
      # If restoring a backup from raw btrfs images (using "incremental yes|strict"):
      # "btrfs send -p parent source > svol.btrfs", the backups
      # on the target will get corrupted (unusable!) as soon as
      # an any files in the chain gets deleted.
      #
      # We need to make sure btrbk will NEVER delete those:
      # - svol.<timestamp>--<received_uuid_0>.btrfs                        : root (full) image
      # - svol.<timestamp>--<received_uuid-n>[@<received_uuid_n-1>].btrfs  : incremental image

      foreach my $child (@{$child_uuid_list{$subvol->{node}{received_uuid}}}) {
        # Insert correct (i.e. fake) parent UUID
        $child->{node}{parent_uuid} = $subvol->{node}{uuid};

        # Make sure that incremental backup chains are never broken:
        DEBUG "Found parent/child partners, forcing preserve of: \"$subvol->{PRINT}\", \"$child->{PRINT}\"";
        $subvol->{node}{FORCE_PRESERVE}  = "preserve forced: parent of another raw target";
        $child->{node}{FORCE_PRESERVE} ||= "preserve forced: child of another raw target";
      }
    }
    # TRACE(Data::Dumper->Dump([\@subvol_list], ["vinfo_raw_subvol_list{$droot}"]));
  }

  $droot->{node} = $tree_root;
  $droot->{VINFO_MOUNTPOINT} = $droot; # fake mountpoint
  $raw_url_cache{$droot->{URL}} = $tree_root;

  return $tree_root;
}


sub _vinfo_subtree_list
{
  my $tree = shift;
  my $vinfo_parent = shift;
  my $filter_readonly = shift; # if set, return only read-only
  my $filter_btrbk_direct_leaf = shift; # if set, return only read-only direct leafs matching btrbk_basename
  my $list = shift // [];
  my $path_prefix = shift // "";
  my $depth = shift // 0;

  # if $vinfo_parent->{NODE_SUBDIR} is set, vinfo_parent->{PATH} does
  # not point to a subvolume directly, but to "<path_to_subvolume>/NODE_SUBDIR".
  # skip nodes wich are not in NODE_SUBDIR, or strip NODE_SUBDIR from from rel_path.
  my $node_subdir_filter = ($depth == 0) ? $vinfo_parent->{NODE_SUBDIR} : undef;
  foreach my $node (@{$tree->{SUBTREE}}) {
    my $rel_path = $node->{REL_PATH};
    if(defined($node_subdir_filter)) {
      next unless($rel_path =~ s/^\Q$node_subdir_filter\E\///);
    }
    my $path = $path_prefix . $rel_path; # always points to a subvolume

    # filter direct leafs (SUBVOL_DIR="") matching btrbk_basename
    next unless(!defined($filter_btrbk_direct_leaf) ||
                (exists($node->{BTRBK_BASENAME}) && ($node->{BTRBK_BASENAME} eq $filter_btrbk_direct_leaf) &&
                 ($rel_path !~ /\//))); # note: depth is always 0 if $filter_btrbk_direct_leaf

    # filter readonly, push vinfo_child
    if(!$filter_readonly || $node->{readonly}) {
      my $vinfo = vinfo_child($vinfo_parent, $path);
      $vinfo->{node} = $node;

      # add some additional information to vinfo
      $vinfo->{subtree_depth} = $depth;

      push(@$list, $vinfo);
    }

    unless(defined($filter_btrbk_direct_leaf)) {
      _vinfo_subtree_list($node, $vinfo_parent, $filter_readonly, undef, $list, $path . '/', $depth + 1);
    }
  }
  return $list;
}


sub vinfo_subvol_list($;@)
{
  my $vol = shift || die;
  my %opts = @_;

  TRACE "Creating subvolume list for: $vol->{PRINT}" if($do_trace);

  # recurse into tree from $vol->{node}, returns arrayref of vinfo
  my $subvol_list = _vinfo_subtree_list($vol->{node}, $vol, $opts{readonly}, $opts{btrbk_direct_leaf});

  if($opts{sort}) {
    if($opts{sort} eq 'path') {
      my @sorted = sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } @$subvol_list;
      return \@sorted;
    }
    else { die; }
  }
  return $subvol_list;
}


# returns vinfo_child if $node is in tree below $vol (or equal if allow_equal), or undef
sub vinfo_resolved($$;@)
{
  my $node = shift || die;
  my $vol = shift || die;  # root vinfo node
  my %opts = @_;
  my $top_id = $vol->{node}{id};
  my @path;
  my $nn = $node;
  while(($nn->{id} != $top_id) && (!$nn->{is_root})) {
    unshift(@path, $nn->{REL_PATH});
    $nn = $nn->{TOP_LEVEL};
  }
  if(scalar(@path) == 0) {
    return $vol if($opts{allow_equal} && not defined($vol->{NODE_SUBDIR}));
    return undef;
  }
  return undef if($nn->{is_root} && (!$vol->{node}{is_root}));
  my $jpath = join('/', @path);
  if(defined($vol->{NODE_SUBDIR})) {
    return undef unless($jpath =~ s/^\Q$vol->{NODE_SUBDIR}\E\///);
  }
  if(defined($opts{btrbk_direct_leaf})) {
    return undef if($jpath =~ /\//);
    return undef unless(exists($node->{BTRBK_BASENAME}) && ($node->{BTRBK_BASENAME} eq $opts{btrbk_direct_leaf}))
  }
  my $vinfo = vinfo_child($vol, $jpath);
  $vinfo->{node} = $node;
  return $vinfo;
}


# returns vinfo if $node is below any mountpoint of $vol
sub vinfo_resolved_all_mountpoints($$)
{
  my $node = shift || die;
  my $vol = shift || die;
  my $tree_root = $vol->{node}{TREE_ROOT};
  foreach my $mnt_node (@{$tree_root->{MOUNTED_NODES}}) {
    foreach my $mountinfo (@{$mnt_node->{MOUNTINFO}}) {
      my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mountinfo->{mount_point}, $vol->{CONFIG});
      $mnt_vol->{node} = $mnt_node;
      TRACE "vinfo_resolved_all_mountpoints: trying mountpoint: $mnt_vol->{PRINT}" if($do_trace);
      my $vinfo = vinfo_resolved($node, $mnt_vol, allow_equal => 1);
      return $vinfo if($vinfo);
    }
  }
  return undef;
}

sub vinfo_subvol($$)
{
  my $vol = shift || die;
  my $subvol_path = shift // die;
  foreach (@{vinfo_subvol_list($vol)}) {
    return $_ if($_->{SUBVOL_PATH} eq $subvol_path);
  }
  return undef;
}


sub vinfo_is_btrbk_snapshot($$;$)
{
  my $vol = shift || die;
  my $btrbk_basename = shift || die;
  my $subvol_dir = shift // "";
  return ($vol->{node}{readonly} && defined($vol->{node}{BTRBK_BASENAME}) &&
          ($vol->{SUBVOL_DIR} eq $subvol_dir) &&
          ($vol->{node}{BTRBK_BASENAME} eq $btrbk_basename));
}


sub vinfo_inject_child($$$;$)
{
  my $vinfo = shift;
  my $vinfo_child = shift;
  my $detail = shift;
  my $raw_info = shift;
  my $node;

  my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
  my $rel_path = $node_subdir . $vinfo_child->{SUBVOL_PATH};

  $node = btr_tree_inject_node($vinfo->{node}, $detail, $rel_path);
  return undef unless(add_btrbk_filename_info($node, $raw_info));

  $vinfo_child->{node} = $node;
  TRACE "vinfo_inject_child: injected child id=$node->{id} to $vinfo->{PRINT}" if($do_trace);
  return $vinfo_child;
}


# returns hash: ( $prefix_{url,path,host,name,subvol_path,rsh} => value, ... )
sub vinfo_prefixed_keys($$)
{
  my $prefix = shift // die;
  my $vinfo = shift;
  return () unless($vinfo);
  my %ret;
  if($prefix) {
    $ret{$prefix} = $vinfo->{PRINT};
    $prefix .= '_';
  }
  foreach (qw( URL PATH HOST PORT NAME )) {
    $ret{$prefix . lc($_)} = $vinfo->{$_};
  }
  $ret{$prefix . "subvolume"} = $vinfo->{PATH};
  $ret{$prefix . "subvolume_path"} = $vinfo->{SUBVOL_PATH};
  my $rsh = vinfo_rsh($vinfo);
  $ret{$prefix . "rsh"} = ($rsh ? join(" ", @$rsh) : undef),
  return %ret;
}


sub vinfo_assign_config($;$)
{
  my $vinfo = shift || die;
  my $vinfo_snapshot_root = shift;
  my $config = $vinfo->{CONFIG} || die;
  die if($config->{VINFO});
  $config->{VINFO} = $vinfo;
  $config->{VINFO_SNAPROOT} = $vinfo_snapshot_root;
}


sub vinfo_snapshot_root($)
{
  my $vinfo = shift;
  return $vinfo->{CONFIG}{VINFO_SNAPROOT};
}


sub vinfo_subsection($$;$)
{
  # if config: must have SUBSECTION key
  # if vinfo:  must have CONFIG key
  my $config_or_vinfo = shift || die;
  my $context = shift || die;
  my $include_aborted = shift;
  my $config_list;
  my $vinfo_check;
  if(exists($config_or_vinfo->{SUBSECTION})) {
    # config
    $config_list = $config_or_vinfo->{SUBSECTION};
  }
  else {
    # vinfo
    $config_list = $config_or_vinfo->{CONFIG}->{SUBSECTION};
    die unless($config_or_vinfo->{CONFIG}->{VINFO} == $config_or_vinfo);  # check back reference
  }

  # for now be paranoid and check all contexts
  my @ret;
  foreach (@$config_list) {
    die unless($_->{CONTEXT} eq $context);
    next if((not $include_aborted) && $_->{ABORTED});
    die unless($_->{VINFO});
    die unless($_->{VINFO}->{CONFIG});
    die unless($_->{VINFO} == $_->{VINFO}->{CONFIG}->{VINFO});  # check all back references
    push @ret, $_->{VINFO};
  }
  return @ret;

  # much simpler implementation, without checks
  #return map { $_->{ABORTED} ? () : $_->{VINFO} } @$config_list;
}


# allow (absolute) path / url with wildcards
# allow group (exact match)
# allow host[:port] (exact match)
sub vinfo_filter_statement($) {
  my $filter = shift;
  my %ret = ( unparsed => $filter );

  my ($url_prefix, $path) = check_url($filter, accept_wildcards => 1);
  unless($path) {
    # allow relative path with wildcards
    $url_prefix = "";
    $path = check_file($filter, { relative => 1, wildcards => 1 }, sanitize => 1);
  }
  if($path) {
    # support "*some*file*", "*/*"
    my $regex = join('[^\/]*', map(quotemeta($_), split(/\*+/, lc($url_prefix) . $path, -1)));
    if($path =~ /^\//) {
      $ret{url_regex} = qr/^$regex$/;    # absolute path, match full string
    } else {
      $ret{url_regex} = qr/\/$regex$/;   # match end of string
    }
  }

  $ret{group_eq} = $filter if($filter =~ /^$group_match$/);

  if($filter =~ /^(?<host>$host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\])(:(?<port>[1-9][0-9]*))?$/) {
    my ($host, $port) = ( $+{host}, $+{port} );
    $host =~ s/^\[//; # remove brackets from ipv6_addr
    $host =~ s/\]$//; # remove brackets from ipv6_addr
    $ret{host_port_eq} = { host => $host, port => $port };
  }
  elsif($filter =~ /^$ipv6_addr_match$/) {
    $ret{host_port_eq} = { host => $filter } ;
  }

  TRACE 'vinfo_filter_statement: filter="' . $filter . '" url_regex="' . ($ret{url_regex} // "<undef>") . '" group_eq="' . ($ret{group_eq} // "<undef>") . '" host_port_eq="' . ($ret{host_port_eq} ? $ret{host_port_eq}{host} . ":" . ($ret{host_port_eq}{port} // "<undef>") : "<undef>") . '"' if($do_trace);
  return undef unless(exists($ret{url_regex}) || exists($ret{group_eq}) || exists($ret{host_port_eq}));
  return \%ret;
}


sub vinfo_match($$;@)
{
  my $filter = shift;
  my $vinfo = shift;
  my %opts = @_;
  my $flag_matched = $opts{flag_matched};

  # match URL against sane path (can contain "//", see vinfo_child),
  # no wildcards
  my ($url_prefix, $path) = check_url($vinfo->{URL});
  my $url = defined($path) ? lc($url_prefix) . $path : undef;
  my $count = 0;
  foreach my $ff (@$filter) {
    if(defined($ff->{group_eq}) && (grep { $ff->{group_eq} eq $_ } @{$vinfo->{CONFIG}{group}})) {
      TRACE "filter \"$ff->{unparsed}\" equals $vinfo->{CONFIG}{CONTEXT} group: $vinfo->{PRINT}" if($do_trace);
      return $ff unless($flag_matched);
      #push @{$ff->{$flag_matched}}, 'group=' . $ff->{group_eq};
      $ff->{$flag_matched} = 1;
      $count++;
    }
    if(defined($ff->{url_regex}) && defined($url) && ($url =~ /$ff->{url_regex}/)) {
      TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} url: $vinfo->{PRINT}" if($do_trace);
      return $ff unless($flag_matched);
      #push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
      $ff->{$flag_matched} = 1;
      $count++;
    }
    if(defined($ff->{host_port_eq}) && defined($vinfo->{HOST})) {
      my $host = $ff->{host_port_eq}{host};
      my $port = $ff->{host_port_eq}{port};
      if((lc($host) eq lc($vinfo->{HOST})) &&
         (!defined($port) || (defined($vinfo->{PORT}) && ($port == $vinfo->{PORT}))))
      {
        TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} host: $vinfo->{PRINT}" if($do_trace);
        return $ff unless($flag_matched);
        #push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
        $ff->{$flag_matched} = 1;
        $count++;
      }
    }
  }
  return $count;
}


sub get_related_snapshots($$;$)
{
  my $snaproot = shift || die;
  my $svol = shift // die;
  my $btrbk_basename = shift; # if set, also filter by direct_leaf
  my @ret = map( { vinfo_resolved($_, $snaproot, btrbk_direct_leaf => $btrbk_basename) // () }
                 _related_nodes($svol->{node}, readonly => 1, omit_self => 1) );

  if($do_trace) { TRACE "get_related_snapshots: found: $_->{PRINT}" foreach(@ret); }
  DEBUG "Found " . scalar(@ret) . " related snapshots of \"$svol->{PRINT}\" in: $snaproot->{PRINT}" . (defined($btrbk_basename) ? "/$btrbk_basename.*" : "");
  return @ret;
}


sub _correlated_nodes($$)
{
  my $dnode = shift || die; # any node on target filesystem
  my $snode = shift || die;
  my @ret;

  if($snode->{is_root}) {
    TRACE "Skip search for correlated targets: source subvolume is btrfs root: " . _fs_path($snode) if($do_trace);
    return @ret;
  }
  unless($snode->{readonly}) {
    TRACE "Skip search for correlated targets: source subvolume is not read-only: " . _fs_path($snode) if($do_trace);
    return @ret;
  }

  # find matches by comparing uuid / received_uuid
  my $uuid = $snode->{uuid};
  my $received_uuid = $snode->{received_uuid};
  $received_uuid = undef if($received_uuid eq '-');

  my $received_uuid_hash = $dnode->{TREE_ROOT}{RECEIVED_UUID_HASH};
  my $uuid_hash = $dnode->{TREE_ROOT}{UUID_HASH};

  # match uuid/received_uuid combinations
  my @match;
  push(@match, @{ $received_uuid_hash->{$uuid} // [] });            # match src.uuid == target.received_uuid
  if($received_uuid) {
    push(@match, $uuid_hash->{$received_uuid} );                    # match src.received_uuid == target.uuid
    push(@match, @{ $received_uuid_hash->{$received_uuid} // [] }); # match src.received_uuid == target.received_uuid
  }

  @ret = grep($_->{readonly}, @match);
  TRACE "correlated_nodes: dst=\"" . _fs_path($dnode) . "\", src=\"" . _fs_path($snode) . "\": [" . join(", ", map _fs_path($_),@ret) . "]" if($do_trace);
  return @ret;
}


# returns array of vinfo of receive targets matching btrbk name
sub get_receive_targets($$;@)
{
  my $droot = shift || die;
  my $src_vol = shift || die;
  my %opts = @_;
  my @ret;

  my @correlated = _correlated_nodes($droot->{node}, $src_vol->{node});
  foreach (@correlated) {
    my $vinfo = vinfo_resolved($_, $droot); # returns undef if not below $droot
    if(exists($_->{BTRBK_RAW})) {
      TRACE "get_receive_targets: found raw receive target: " . _fs_path($_) if($do_trace);
    }
    elsif($vinfo && ($vinfo->{SUBVOL_PATH} eq $src_vol->{NAME})) { # direct leaf, (SUBVOL_DIR = "", matching NAME)
      TRACE "get_receive_targets: found receive target (exact-match): $vinfo->{PRINT}" if($do_trace);
    }
    elsif($vinfo && (not $opts{exact})) {
      TRACE "get_receive_targets: found receive target (non-exact-match): $vinfo->{PRINT}" if($do_trace);
    }
    else {
      TRACE "get_receive_targets: skip unexpected match: " . _fs_path($_) if($do_trace);
      ${$opts{ret_unexpected}} = 1 if($opts{ret_unexpected});
      if($opts{warn} && config_key($droot, "warn_unknown_targets")) {
        WARN "Receive target of \"$src_vol->{PRINT}\" exists at unknown location: " . ($vinfo ? $vinfo->{PRINT} : _fs_path($_));
      }
      next;
    }
    push(@ret, $vinfo);
  }
  return @ret;
}


# returns best correlated receive target within droot (independent of btrbk name)
sub get_best_correlated($$;@)
{
  my $droot = shift || die;
  my $src_vol = shift || die;
  my %opts = @_;
  my $inaccessible_nodes = $opts{push_inaccessible_nodes};

  my @correlated = _correlated_nodes($droot->{node}, $src_vol->{node}); # all matching src_vol, from droot->TREE_ROOT
  foreach (@correlated) {
    my $vinfo = vinfo_resolved($_, $droot); # $vinfo is within $droot
    return [ $src_vol, $vinfo ] if($vinfo);
  }
  if($opts{fallback_all_mountpoints}) {
    foreach (@correlated) {
      my $vinfo = vinfo_resolved_all_mountpoints($_, $droot); # $vinfo is within any mountpoint of filesystem at $droot
      return [ $src_vol, $vinfo ] if($vinfo);
    }
  }
  push @$inaccessible_nodes, @correlated if($inaccessible_nodes);
  return undef;
}


# returns all related readonly nodes (by parent_uuid relationship), unsorted.
sub _related_nodes($;@)
{
  my $snode = shift // die;
  my %opts = @_;
  TRACE "related_nodes: resolving related subvolumes of: " . _fs_path($snode) if($do_trace);

  # iterate parent chain
  my @related_nodes;
  my $uuid_hash = $snode->{TREE_ROOT}{UUID_HASH};
  my $parent_uuid_hash = $snode->{TREE_ROOT}{PARENT_UUID_HASH};
  my $node = $snode;
  my $uuid = $node->{uuid};
  my $abort_distance = 4096;

  # climb up parent chain
  my $distance = 0;  # parent distance
  while(($distance < $abort_distance) && defined($node) && ($node->{parent_uuid} ne "-")) {
    $uuid = $node->{parent_uuid};
    $node = $uuid_hash->{$uuid};
    TRACE "related_nodes: d=$distance uuid=$uuid : parent: " . ($node ? _fs_path($node) : "<deleted>") if($do_trace);
    $distance++;
  }
  if($distance >= $abort_distance) {
    my $logmsg = "Parent UUID chain exceeds depth=$abort_distance, ignoring related parents of uuid=$uuid for: " . _fs_path($snode);
    DEBUG $logmsg;
    WARN_ONCE $logmsg unless($opts{nowarn});
  }
  TRACE "related_nodes: d=$distance uuid=$uuid : top of parent chain" if($do_trace);

  # push related children (even if parent node is missing -> siblings)
  my @nn;
  $abort_distance = $abort_distance;
  $distance = $distance * (-1);  # child distance (from top parent)
  while($uuid) {
    push @related_nodes, $node if($node && (!$opts{readonly} || $node->{readonly}));
    my $children = $parent_uuid_hash->{$uuid};
    if($children) {
      if($distance >= $abort_distance) {
        my $logmsg = "Parent/child relations exceed depth=$abort_distance, ignoring related children of uuid=$uuid for: " . _fs_path($snode);
        DEBUG $logmsg;
        WARN_ONCE $logmsg unless($opts{nowarn});
      } else {
        push @nn, { MARK_UUID => $uuid, MARK_DISTANCE => ($distance + 1) }, @$children;
      }
    }

    if($do_trace) {
      if($node) {
        if($node->{readonly}) {
          TRACE "related_nodes: d=$distance uuid=$uuid : push related readonly: " . _fs_path($node);
        } else {
          TRACE "related_nodes: d=$distance uuid=$uuid : " . ($opts{readonly} ? "" : "push ") . "related not readonly: " . _fs_path($node);
        }
      } else {
        TRACE "related_nodes: d=$distance uuid=$uuid : related missing: <deleted>";
      }
      if($children && ($distance < $abort_distance)) {
        TRACE "related_nodes: d=$distance uuid=$uuid : postpone " . scalar(@$children) . " children";
      }
    }

    $node = shift @nn;
    if(exists($node->{MARK_DISTANCE})) {
      # marker reached, restore distance
      $distance = $node->{MARK_DISTANCE};
      TRACE "related_nodes: d=$distance uuid=$node->{MARK_UUID} : processing children" if($do_trace);
      $node = shift @nn;
    }
    $uuid = $node->{uuid};
  }

  if($opts{omit_self}) {
    my $snode_id = $snode->{id};
    my @filtered = grep { $_->{id} != $snode_id } @related_nodes;
    TRACE "related_nodes: found total=" . scalar(@filtered) . " related readonly subvolumes" if($do_trace);
    return @filtered;
  }
  TRACE "related_nodes: found total=" . scalar(@related_nodes) . " related readonly subvolumes (including self)" if($do_trace);
  return @related_nodes;
}


# returns parent, along with clone sources
sub get_best_parent($$$;@)
{
  my $svol = shift // die;
  my $snaproot = shift // die;
  my $droot = shift || die;
  my %opts = @_;
  my $ret_clone_src          = $opts{clone_src};
  my $ret_clone_src_extra    = $opts{clone_src_extra};
  my $ret_target_parent_node = $opts{target_parent_node};
  my $strict_related         = $opts{strict_related};

  TRACE "get_best_parent: resolving best common parent for subvolume: $svol->{PRINT}  (droot=$droot->{PRINT})" if($do_trace);

  # honor incremental_resolve option
  my $source_incremental_resolve = config_key($svol, "incremental_resolve");
  my $target_incremental_resolve = config_key($droot, "incremental_resolve");
  my $resolve_sroot = ($source_incremental_resolve eq "mountpoint") ? $snaproot->{VINFO_MOUNTPOINT} : $snaproot;
  my $resolve_droot = ($source_incremental_resolve eq "mountpoint") ? $droot->{VINFO_MOUNTPOINT} : $droot;

  # NOTE: Using parents from different mount points does NOT work, see
  # <https://github.com/kdave/btrfs-progs/issues/96>.
  # btrfs-progs-4.20.2 fails if the parent subvolume is not on same
  # mountpoint as the source subvolume:
  #  - btrfs send -p: "ERROR: not on mount point: /path/to/mountpoint"
  #  - btrfs receive: "ERROR: parent subvol is not reachable from inside the root subvol"
  my $source_fallback_all_mountpoints = ($source_incremental_resolve eq "_all_accessible");
  my $target_fallback_all_mountpoints = ($target_incremental_resolve eq "_all_accessible");

  my @inaccessible_nodes;
  my %gbc_opts = ( push_inaccessible_nodes  => \@inaccessible_nodes,
                   fallback_all_mountpoints => $target_fallback_all_mountpoints,
                 );

  # resolve correlated subvolumes by parent_uuid relationship.
  # no warnings on aborted search (due to deep relations), note that
  # we could limit the search depth here for some performance
  # improvements, as this only affects extra clones.
  my %c_rel_id;   # map id to c_related
  my @c_related;  # candidates for parent (correlated + related), unsorted
  foreach (_related_nodes($svol->{node}, readonly => 1, omit_self => 1, nowarn => 1)) {
    my $vinfo = vinfo_resolved($_, $resolve_sroot);
    if((not $vinfo) && $source_fallback_all_mountpoints) { # related node is not under $resolve_sroot
      $vinfo = vinfo_resolved_all_mountpoints($_, $svol);
    }
    if($vinfo) {
      my $correlated = get_best_correlated($resolve_droot, $vinfo, %gbc_opts);
      push @c_related, $correlated if($correlated);
      $c_rel_id{$_->{id}} = $correlated;
    } else {
      DEBUG "Related subvolume is not accessible within $source_incremental_resolve \"$resolve_sroot->{PRINT}\": " . _fs_path($_);
    }
  }
  # sort by cgen
  my $cgen_ref = $svol->{node}{readonly} ? $svol->{node}{cgen} : $svol->{node}{gen};
  my @c_related_older = sort { ($cgen_ref - $a->[0]{node}{cgen}) <=> ($cgen_ref - $b->[0]{node}{cgen}) }
                        grep { $_->[0]{node}{cgen} <= $cgen_ref } @c_related;
  my @c_related_newer = sort { ($a->[0]{node}{cgen} - $cgen_ref) <=> ($b->[0]{node}{cgen} - $cgen_ref) }
                        grep { $_->[0]{node}{cgen} >  $cgen_ref } @c_related;

  # NOTE: While _related_nodes() returns deep parent_uuid
  # relations, there is always a chance that these relations get
  # broken.
  #
  # Consider parent_uuid chain ($svol readonly)
  # B->A, C->B, delete B: C has no relation to A.
  # This is especially true for backups and archives (btrfs receive)
  #
  # For snapshots (here: S=$svol readwrite) the scenario is different:
  # A->S, B->S, C->S, delete B: A still has a relation to C.
  #
  # resolve correlated subvolumes in same directory matching btrbk file name scheme
  my (@c_snapdir_older, @c_snapdir_newer);
  if(exists($svol->{node}{BTRBK_BASENAME})) {
    my $snaproot_btrbk_direct_leaf = vinfo_subvol_list($snaproot, readonly => 1, btrbk_direct_leaf => $svol->{node}{BTRBK_BASENAME});
    my @sbdl_older = sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) }
                     grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) < 0 } @$snaproot_btrbk_direct_leaf;
    my @sbdl_newer = sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
                     grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) > 0 } @$snaproot_btrbk_direct_leaf;

    @c_snapdir_older = map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_older;
    @c_snapdir_newer = map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_newer;
  }

  if($do_trace) {
    TRACE "get_best_parent: related reference cgen=$svol->{node}{cgen}" if($do_trace);
    TRACE map("get_best_parent: related older: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})  $_->[1]{PRINT}", @c_related_older);
    TRACE map("get_best_parent: related newer: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})  $_->[1]{PRINT}", @c_related_newer);
    TRACE map("get_best_parent: snapdir older: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})  $_->[1]{PRINT}", @c_snapdir_older);
    TRACE map("get_best_parent: snapdir newer: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})  $_->[1]{PRINT}", @c_snapdir_newer);
  }

  if(scalar @inaccessible_nodes) { # populated by get_best_correlated()
    WARN "Best common parent for \"$svol->{PRINT}\" is not accessible within target $target_incremental_resolve \"$resolve_droot->{PRINT}\", ignoring: " . join(", ", map('"' . _fs_path($_) . '"',@inaccessible_nodes));
  }

  # preferences for parent (and required clone sources):
  #  1. closest older in snapdir (by btrbk timestamp), related
  #  2. closest older related (by cgen)
  #  3. closest newer related (by cgen)
  #  4. closest older in snapdir (by btrbk timestamp)
  #  5. closest newer in snapdir (by btrbk timestamp)
  #
  my @parent;
  if(my $cc = shift @c_related_older) {
    push @parent, $cc;         #  2. closest older related (by cgen)
    DEBUG "Resolved best common parent (closest older parent_uuid relationship): $cc->[0]{PRINT}";
  }
  if(my $cc = shift @c_related_newer) {
    DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest newer parent_uuid relationship): $cc->[0]{PRINT}");
    push @parent, $cc;         #  3. closest newer related (by cgen)
  }
  if(my $cc = shift @c_snapdir_older) {
    unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent) {
      if($c_rel_id{$cc->[0]{node}{id}}) {
        DEBUG "Resolved best common parent (closest older btrbk timestamp, with parent_uuid relationship): $cc->[0]{PRINT}";
        unshift @parent, $cc;  #  1. closest older in snapdir (by btrbk timestamp), related
      }
      else {
        DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest older btrbk timestamp): $cc->[0]{PRINT}");
        push @parent, $cc;     #  4. closest older in snapdir (by btrbk timestamp)
      }
    }
  }
  if(my $cc = shift @c_snapdir_newer) {
    unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent) {
      DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest newer btrbk timestamp): $cc->[0]{PRINT}");
      push @parent, $cc;       #  5. closest newer in snapdir (by btrbk timestamp)
    }
  }

  # assemble results
  unless(scalar @parent) {
    DEBUG("No common parents of \"$svol->{PRINT}\" found in src=\"$resolve_sroot->{PRINT}/\", target=\"$resolve_droot->{PRINT}/\"");
    return undef;
  }
  if($strict_related && (not scalar(@c_related))) {
    # all parents come from c_snapdir (btrbk_direct_leaf), no relations by parent_uuid found
    WARN "No related common parent found (by parent_uuid relationship) for: $svol->{PRINT}";
    WARN "Hint: setting option \"incremental\" to \"yes\" (instead of \"strict\") will use parent: " . join(", ", map { $_->[0]{PRINT} } @parent);
    return undef;
  }

  my @extra_clones;
  foreach my $cc (@c_related_older, @c_related_newer, grep { not exists($c_rel_id{$_->[0]{node}{id}}) } (@c_snapdir_older, @c_snapdir_newer)) {
    push @extra_clones, $cc->[0] unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent);
  }
  DEBUG "Resolved " . (scalar @extra_clones) . " extra clone sources";
  if($do_trace) {
    TRACE "get_best_parent: parent,clones: $_->[0]{PRINT}" foreach(@parent);
    TRACE "get_best_parent: extra clone  : $_->{PRINT}" foreach(@extra_clones);
  }

  my $ret_parent = shift @parent;
  my @clone_src = map { $_->[0] } @parent;
  $$ret_clone_src = \@clone_src if($ret_clone_src);
  $$ret_clone_src_extra = \@extra_clones if($ret_clone_src_extra);
  $$ret_target_parent_node = $ret_parent->[1]{node} if($ret_target_parent_node);
  return $ret_parent->[0];
}


sub get_latest_related_snapshot($$;$)
{
  my $sroot = shift || die;
  my $svol = shift // die;
  my $btrbk_basename = shift;
  my $latest = undef;
  my $gen = -1;
  foreach (get_related_snapshots($sroot, $svol, $btrbk_basename)) {
    if($_->{node}{cgen} > $gen) {
      $latest = $_;
      $gen = $_->{node}{cgen};
    }
  }
  if($latest) {
    DEBUG "Latest snapshot child for \"$svol->{PRINT}#$svol->{node}{gen}\" is: $latest->{PRINT}#$latest->{node}{cgen}";
  } else {
    DEBUG "No latest snapshots found for: $svol->{PRINT}";
  }
  return $latest;
}


sub check_file($$;@)
{
  my $file = shift // die;
  my $accept = shift || die;
  my %opts = @_;
  my $sanitize = $opts{sanitize};
  my $error_statement = $opts{error_statement};  # if not defined, no error messages are printed
  my $match = $accept->{wildcards} ? $glob_match : $file_match;

  if($file =~ /^($match)$/) {
    $file = $1;
    if($accept->{absolute}) {
      unless($file =~ /^\//) {
        ERROR "Only absolute files allowed $error_statement" if(defined($error_statement));
        return undef;
      }
    }
    elsif($accept->{relative}) {
      if($file =~ /^\//) {
        ERROR "Only relative files allowed $error_statement" if(defined($error_statement));
        return undef;
      }
    }
    elsif($accept->{name_only}) {
      if($file =~ /\//) {
        ERROR "Invalid file name ${error_statement}: $file" if(defined($error_statement));
        return undef;
      }
    }
    elsif(not $accept->{wildcards}) {
      die("accept_type must contain either 'relative' or 'absolute'");
    }
  }
  else {
    ERROR "Ambiguous file ${error_statement}: $file" if(defined($error_statement));
    return undef;
  }
  # check directory traversal
  if(($file =~ /^\.\.$/) || ($file =~ /^\.\.\//) || ($file =~ /\/\.\.\//) || ($file =~ /\/\.\.$/)) {
    ERROR "Illegal directory traversal ${error_statement}: $file" if(defined($error_statement));
    return undef;
  }
  if($sanitize) {
    $file =~ s/\/(\.?\/)+/\//g;  # sanitize "//", "/./" -> "/"
    $file =~ s/\/\.$/\//;        # sanitize trailing "/." -> "/"
    $file =~ s/\/$// unless($file eq '/');   # remove trailing slash
  }
  return $file;
}


sub check_url($;@)
{
  my $url = shift // die;
  my %opts = @_;
  my $url_prefix = "";

  if($url =~ /^ssh:\/\//) {
    if($url =~ s/^(ssh:\/\/($host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\])(:[1-9][0-9]*)?)\//\//) {
      $url_prefix = $1;
    }
  }
  elsif($url =~ s/^($host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\]):\//\//) {
    # convert "my.host.com:/my/path", "[2001:db8::7]:/my/path" to ssh url
    $url_prefix = "ssh://" . $1;
  }
  # if no url prefix match, treat it as file and let check_file() print errors

  return ( $url_prefix, check_file($url, { absolute => 1, wildcards => $opts{accept_wildcards} }, sanitize => 1, %opts) );
}


sub config_key($$;@)
{
  my $config = shift || die;
  my $key = shift || die;
  my %opts = @_;
  my $orig_config = $config;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config

  if(exists($config_override{$key})) {
    TRACE "config_key: OVERRIDE key=$key to value=" . ($config_override{$key} // "<undef>") if($do_trace);
    return $config_override{$key};
  }

  while(not exists($config->{$key})) {
    # note: while all config keys exist in "meta" context (at least with default values),
    #       we also allow fake configs (CONTEXT="cmdline") which have no PARENT.
    return undef unless($config->{PARENT});
    $config = $config->{PARENT};
  }
  my $retval = $config->{$key};
  $retval = $opts{prefix} . $retval if(defined($opts{prefix}) && defined($retval));
  $retval .= $opts{postfix} if(defined($opts{postfix}) && defined($retval));
  return $retval;
}


sub config_preserve_hash($$;@)
{
  my $config = shift || die;
  my $prefix = shift || die;
  my %opts = @_;
  if($opts{wipe}) {
    return { hod => 0, dow => 'sunday',  min => 'latest', min_q => 'latest' };
  }
  my $preserve = config_key($config, $prefix . "_preserve") // {};
  my %ret = ( %$preserve,  # make a copy (don't pollute config)
              hod => config_key($config, "preserve_hour_of_day"),
              dow => config_key($config, "preserve_day_of_week")
             );
  my $preserve_min = config_key($config, $prefix . "_preserve_min");
  if(defined($preserve_min)) {
    $ret{min} = $preserve_min;  # used for raw schedule output
    if(($preserve_min eq 'all') || ($preserve_min eq 'latest')) {
      $ret{min_q} = $preserve_min;
    }
    elsif($preserve_min =~ /^([0-9]+)([hdwmy])$/) {
      $ret{min_n} = $1;
      $ret{min_q} = $2;
    }
    else { die; }
  }
  return \%ret;
}


sub config_compress_hash($$)
{
  my $config = shift || die;
  my $config_key = shift || die;
  my $compress_key = config_key($config, $config_key);
  return undef unless($compress_key);
  return {
    key     => $compress_key,
    level   => config_key($config, $config_key . "_level"),
    long    => config_key($config, $config_key . "_long"),
    threads => config_key($config, $config_key . "_threads"),
    adapt   => config_key($config, $config_key . "_adapt"),
  };
}


sub config_stream_hash($$)
{
  my $source = shift || die;
  my $target = shift || die;
  return {
    stream_compress => config_compress_hash($target, "stream_compress"),

    # for remote source, limits read rate of ssh stream output after decompress
    # for remote target, limits read rate of "btrfs send"
    # for both local, limits read rate of "btrfs send"
    # for raw targets, limits read rate of "btrfs send | xz" (raw_target_compress)
    local_sink      => {
      stream_buffer => config_key($target, "stream_buffer"),
      rate_limit    => config_key($target, "rate_limit"),
      show_progress => $show_progress,
    },

    # limits read rate of "btrfs send"
    rsh_source      => {  # limit read rate after "btrfs send", before compression
      stream_buffer => config_key($source, "stream_buffer_remote"),
      rate_limit    => config_key($source, "rate_limit_remote"),
      #rate_limit_out => config_key($source, "rate_limit_remote"), # limit write rate
    },

    # limits read rate of ssh stream output
    rsh_sink        => {
      stream_buffer => config_key($target, "stream_buffer_remote"),
      rate_limit    => config_key($target, "rate_limit_remote"),
      #rate_limit_in  => config_key($target, "rate_limit_remote"),
    },
  };
}


sub config_encrypt_hash($$)
{
  my $config = shift || die;
  my $config_key = shift || die;
  my $encrypt_type = config_key($config, $config_key);
  return undef unless($encrypt_type);
  return {
    type => $encrypt_type,
    keyring => config_key($config, "gpg_keyring"),
    recipient => config_key($config, "gpg_recipient"),
    iv_size => config_key($config, "openssl_iv_size"),
    ciphername => config_key($config, "openssl_ciphername"),
    keyfile => config_key($config, "openssl_keyfile"),
    kdf_keygen_each => (config_key($config, "kdf_keygen") eq "each"),
    kdf_backend => config_key($config, "kdf_backend"),
    kdf_keysize => config_key($config, "kdf_keysize"),
  };
}


sub config_dump_keys($;@)
{
  my $config = shift || die;
  my %opts = @_;
  my @ret;
  my $maxlen = 0;
  $config = $config->{CONFIG} if($config->{CONFIG});  # accept vinfo for $config

  foreach my $key (sort keys %config_options)
  {
    my $val;
    next if($config_options{$key}->{deprecated});
    if($opts{resolve}) {
      $val = config_key($config, $key);
    } else {
      next unless exists($config->{$key});
      $val = $config->{$key};
    }
    my @valary = (ref($val) eq "ARRAY") ? @$val : $val;
    foreach(@valary) {
      if(defined($_)) {
        if($config_options{$key}->{accept_preserve_matrix}) {
          $_ = format_preserve_matrix($_, format => "config");
        }
      }
      $_ //= exists($config->{$key}) ? "no" : "<unset>";
      my $len = length($key);
      $maxlen = $len if($len > $maxlen);
      push @ret, { key => $key, val => $_, len => $len };
    }
  }
  # print as table
  return map { ($opts{prefix} // "") . $_->{key} . (' ' x (1 + $maxlen - $_->{len})) . ' ' . $_->{val} } @ret;
}


sub append_config_option($$$$;@)
{
  my $config = shift;
  my $key = shift;
  my $value = shift;
  my $context = shift;
  my %opts = @_;
  my $error_statement = $opts{error_statement} // "";

  my $opt = $config_options{$key};

  # accept only keys listed in %config_options
  unless($opt) {
    ERROR "Unknown option \"$key\" $error_statement";
    return undef;
  }

  if($opt->{context} && !grep(/^$context$/, @{$opt->{context}}) && ($context ne "OVERRIDE")) {
    ERROR "Option \"$key\" is only allowed in " . join(" or ", @{$opt->{context}}) . " context $error_statement";
    return undef;
  }

  if($opt->{deny_glob_context} && $config->{GLOB_CONTEXT}) {
    ERROR "Option \"$key\" is not allowed on section with wildcards $error_statement";
    return undef;
  }

  if($value eq "") {
    if(grep(/^yes$/, @{$opt->{accept}})) {
      $value = "yes";
      TRACE "option \"$key\" has no value, accepted map to \"yes\"" if($do_trace);
    }
    else {
      ERROR "Unsupported empty value for option \"$key\" $error_statement";
      return undef;
    }
  }
  elsif(grep(/^\Q$value\E$/, @{$opt->{accept}})) {
    TRACE "option \"$key=$value\" found in accept list" if($do_trace);
  }
  elsif($opt->{accept_numeric} && ($value =~ /^[0-9]+$/)) {
    TRACE "option \"$key=$value\" is numeric, accepted" if($do_trace);
  }
  elsif($opt->{accept_file})
  {
    # be very strict about file options, for security sake
    $value = check_file($value, $opt->{accept_file}, sanitize => 1, error_statement => ($error_statement ? "for option \"$key\" $error_statement" : undef));
    return undef unless(defined($value));

    TRACE "option \"$key=$value\" is a valid file, accepted" if($do_trace);
    $value = "no" if($value eq ".");  # maps to undef later
  }
  elsif($opt->{accept_regexp}) {
    my $match = $opt->{accept_regexp};
    if($value =~ m/$match/) {
      TRACE "option \"$key=$value\" matched regexp, accepted" if($do_trace);
    }
    else {
      ERROR "Value \"$value\" failed input validation for option \"$key\" $error_statement";
      return undef;
    }
  }
  elsif($opt->{accept_preserve_matrix}) {
    my %preserve;
    my $s = ' ' . $value;
    while($s =~ s/\s+(\*|[0-9]+)([hdwmyHDWMY])//) {
      my $n = $1;
      my $q = lc($2); # qw( h d w m y )
      $n = 'all' if($n eq '*');
      if(exists($preserve{$q})) {
        ERROR "Value \"$value\" failed input validation for option \"$key\": multiple definitions of '$q' $error_statement";
        return undef;
      }
      $preserve{$q} = $n;
    }
    unless($s eq "") {
      ERROR "Value \"$value\" failed input validation for option \"$key\" $error_statement";
      return undef;
    }
    TRACE "adding preserve matrix $context context:" . Data::Dumper->new([\%preserve], [ $key ])->Indent(0)->Pad(' ')->Quotekeys(0)->Pair('=>')->Dump() if($do_trace && $do_dumper);
    $config->{$key} = \%preserve;
    return $config;
  }
  else
  {
    ERROR "Unsupported value \"$value\" for option \"$key\" $error_statement";
    return undef;
  }

  if($opt->{require_bin} && (not check_exe($opt->{require_bin}))) {
    WARN "Found option \"$key\", but required executable \"$opt->{require_bin}\" does not exist on your system. Please install \"$opt->{require_bin}\".";
    WARN "Ignoring option \"$key\" $error_statement";
    $value = "no";
  }

  if($opt->{deprecated}) {
    if(my $warn_msg = ($opt->{deprecated}->{$value}->{warn} || $opt->{deprecated}->{DEFAULT}->{warn})) {
      WARN "Found deprecated option \"$key $value\" $error_statement: $warn_msg";
    }
    if($opt->{deprecated}->{$value}->{ABORT} || $opt->{deprecated}->{DEFAULT}->{ABORT}) {
      ERROR "Deprecated (incompatible) option \"$key\" found $error_statement, refusing to continue";
      return undef;
    }
    if($opt->{deprecated}->{$value}->{FAILSAFE_PRESERVE} || $opt->{deprecated}->{DEFAULT}->{FAILSAFE_PRESERVE}) {
      unless($config_override{FAILSAFE_PRESERVE}) { # warn only once
        WARN "Entering failsafe mode:";
        WARN "  - preserving ALL snapshots for ALL subvolumes";
        WARN "  - ignoring ALL targets (skipping backup creation)";
        WARN "  - please read \"doc/upgrade_to_v0.23.0.md\"";
        $config_override{FAILSAFE_PRESERVE} = "Failsafe mode active (deprecated configuration)";
      }
      $config_override{snapshot_preserve_min} = 'all';
      return $config;
    }
    my $replace_key   = $opt->{deprecated}->{$value}->{replace_key};
    my $replace_value = $opt->{deprecated}->{$value}->{replace_value};
    if(defined($replace_key)) {
      $key = $replace_key;
      $value = $replace_value;
      WARN "Using \"$key $value\"";
    }
  }

  if($opt->{allow_multiple}) {
    my $aref = $config->{$key} // [];
    if($opt->{split}) {
      push(@$aref, split($opt->{split}, $value));
    }
    else {
      push(@$aref, $value);
    }
    TRACE "pushing option \"$key=$value\" to $aref=[" . join(',', @$aref) . "]" if($do_trace);
    $value = $aref;
  }
  elsif(exists($config->{$key})) {
    unless($opt->{c_default}) { # note: computed defaults are already present
      WARN "Option \"$key\" redefined $error_statement";
    }
  }

  TRACE "adding option \"$key=$value\" to $context context" if($do_trace);
  $value = undef if($value eq "no");  # we don't want to check for "no" all the time
  $config->{$key} = $value;
  return $config;
}


sub parse_config_line($$$$$)
{
  my ($file, $root, $cur, $key, $value) = @_;

  if($key eq "volume")
  {
    $cur = $root;
    TRACE "config: context forced to: $cur->{CONTEXT}" if($do_trace);

    # be very strict about file options, for security sake
    my ($url_prefix, $path) = check_url($value, error_statement => "for option \"$key\" in \"$file\" line $.");
    return undef unless(defined($path));
    TRACE "config: adding volume \"$url_prefix$path\" to global context" if($do_trace);
    die unless($cur->{CONTEXT} eq "global");
    my $volume = { CONTEXT    => "volume",
                   PARENT     => $cur,
                   SUBSECTION => [],
                   url        => $url_prefix . $path,
                  };
    push(@{$cur->{SUBSECTION}}, $volume);
    $cur = $volume;
  }
  elsif($key eq "subvolume")
  {
    while($cur->{CONTEXT} ne "volume") {
      if(($cur->{CONTEXT} eq "global") || (not $cur->{PARENT})) {
        ERROR "Subvolume keyword outside volume context, in \"$file\" line $.";
        return undef;
      }
      $cur = $cur->{PARENT} || die;
      TRACE "config: context changed to: $cur->{CONTEXT}" if($do_trace);
    }
    # be very strict about file options, for security sake
    my $rel_path = check_file($value, { relative => 1, wildcards => 1 }, sanitize => 1, error_statement => "for option \"$key\" in \"$file\" line $.");
    return undef unless(defined($rel_path));

    # snapshot_name defaults to subvolume name (or volume name if subvolume=".")
    my $default_snapshot_name = ($rel_path eq '.') ? $cur->{url} : $rel_path;
    $default_snapshot_name =~ s/^.*\///;
    $default_snapshot_name = 'ROOT' if($default_snapshot_name eq ""); # if volume="/"

    TRACE "config: adding subvolume \"$rel_path\" to volume context: $cur->{url}" if($do_trace);
    my $subvolume = { CONTEXT       => "subvolume",
                      PARENT        => $cur,
                      # SUBSECTION    => [],  # handled by target propagation
                      rel_path      => $rel_path,
                      url           => $cur->{url} . '/' . $rel_path,
                      snapshot_name => $default_snapshot_name,  # computed default (c_default)
                     };
    $subvolume->{GLOB_CONTEXT} = 1 if($value =~ /\*/);
    push(@{$cur->{SUBSECTION}}, $subvolume);
    $cur = $subvolume;
  }
  elsif($key eq "target")
  {
    if($cur->{CONTEXT} eq "target") {
      $cur = $cur->{PARENT} || die;
      TRACE "config: context changed to: $cur->{CONTEXT}" if($do_trace);
    }
    if($value =~ /^((?<target_type>\S+)\s+)?(?<url>\S+)$/)
    {
      # as of btrbk-0.28.0, target_type is optional and defaults to "send-receive"
      my $target_type = $+{target_type} // "send-receive";
      my $url = $+{url};
      unless(grep(/^\Q$target_type\E$/, @config_target_types)) {
        ERROR "Unknown target type \"$target_type\" in \"$file\" line $.";
        return undef;
      }
      # be very strict about file options, for security sake
      my ($url_prefix, $path) = check_url($url, error_statement => "for option \"$key\" in \"$file\" line $.");
      return undef unless(defined($path));

      TRACE "config: adding target \"$url_prefix$path\" (type=$target_type) to $cur->{CONTEXT} context" . ($cur->{url} ? ": $cur->{url}" : "") if($do_trace);
      my $target = { CONTEXT => "target",
                     PARENT => $cur,
                     target_type => $target_type,
                     url => $url_prefix . $path,
                   };
      # NOTE: target sections are propagated to the apropriate SUBSECTION in _config_propagate_target()
      $cur->{TARGET} //= [];
      push(@{$cur->{TARGET}}, $target);
      $cur = $target;
    }
    else
    {
      ERROR "Ambiguous target configuration, in \"$file\" line $.";
      return undef;
    }
  }
  else
  {
    return append_config_option($cur, $key, $value, $cur->{CONTEXT}, error_statement => "in \"$file\" line $.");
  }

  return $cur;
}


sub _config_propagate_target
{
  my $cur = shift;
  foreach my $subsection (@{$cur->{SUBSECTION}}) {
    my @propagate_target;
    foreach my $target (@{$cur->{TARGET}}) {
      TRACE "propagating target \"$target->{url}\" from $cur->{CONTEXT} context to: $subsection->{CONTEXT} $subsection->{url}" if($do_trace);
      die if($target->{SUBSECTION});

      # don't propagate if a target of same target_type and url already exists in subsection
      if($subsection->{TARGET} &&
         grep({ ($_->{url} eq $target->{url}) && ($_->{target_type} eq $target->{target_type}) } @{$subsection->{TARGET}}))
      {
        DEBUG "Skip propagation of \"target $target->{target_type} $target->{url}\" from $cur->{CONTEXT} context to \"$subsection->{CONTEXT} $subsection->{url}\": same target already exists";
        next;
      }

      my %copy = ( %$target, PARENT => $subsection );
      push @propagate_target, \%copy;
    }
    $subsection->{TARGET} //= [];
    unshift @{$subsection->{TARGET}}, @propagate_target;  # maintain config order: propagated targets go in front of already defined targets
    if($subsection->{CONTEXT} eq "subvolume") {
      # finally create missing SUBSECTION in subvolume context
      die if($subsection->{SUBSECTION});
      $subsection->{SUBSECTION} = $subsection->{TARGET};
    }
    else {
      # recurse into SUBSECTION
      _config_propagate_target($subsection);
    }
  }
  delete $cur->{TARGET};
  return $cur;
}


sub _config_collect_values
{
  my $config = shift;
  my $key = shift;
  my @values;
  push(@values, @{$config->{$key}}) if(ref($config->{$key}) eq "ARRAY");
  foreach (@{$config->{SUBSECTION}}) {
    push(@values, _config_collect_values($_, $key));
  }
  return @values;
}


sub init_config(@)
{
  my %defaults = ( CONTEXT => "meta", @_ );
  # set defaults
  foreach (keys %config_options) {
    next if $config_options{$_}->{deprecated};  # don't pollute hash with deprecated options
    $defaults{$_} = $config_options{$_}->{default};
  }
  return { CONTEXT => "global", SUBSECTION => [], PARENT => \%defaults };
}


sub _config_file(@) {
  my @config_files = @_;
  foreach my $file (@config_files) {
    TRACE "config: checking for file: $file" if($do_trace);
    return $file if(-r "$file");
  }
  return undef;
}

sub parse_config($)
{
  my $file = shift;
  return undef unless($file);

  my $root = init_config(SRC_FILE => $file);
  my $cur = $root;

  TRACE "config: open configuration file: $file" if($do_trace);
  open(FILE, '<', $file) or die $!;
  while (<FILE>) {
    chomp;
    s/#.*//;         # remove comments
    s/\s*$//;        # remove trailing whitespace
    next if /^\s*$/; # ignore empty lines
    TRACE "config: parsing line $. with context=$cur->{CONTEXT}: \"$_\"" if($do_trace);
    if(/^(\s*)([a-zA-Z_]+)(\s+(.*))?$/)
    {
      # NOTE: we do not perform checks on indentation!
      my ($indent, $key, $value) = (length($1), lc($2), $4 // "");
      $cur = parse_config_line($file, $root, $cur, $key, $value);
      unless(defined($cur)) {
        # error, bail out
        $root = undef;
        last;
      }
      TRACE "line processed: new context=$cur->{CONTEXT}" if($do_trace);
    }
    else
    {
      ERROR "Parse error in \"$file\" line $.";
      $root = undef;
      last;
    }
  }
  close FILE || ERROR "Failed to close configuration file: $!";

  _config_propagate_target($root);

  return $root;
}


# sets $target->{CONFIG}->{ABORTED} on failure
# sets $target->{SUBVOL_RECEIVED}
sub macro_send_receive(@)
{
  my %info = @_;
  my $source = $info{source} || die;
  my $target = $info{target} || die;
  my $parent = $info{parent};
  my @clone_src = @{ $info{clone_src} // [] }; # copy array
  my $clone_src_extra = $info{clone_src_extra} // [];
  my $config_target = $target->{CONFIG};
  my $target_type = $config_target->{target_type} || die;
  my $incremental = config_key($config_target, "incremental");

  # check for existing target subvolume
  if(my $err_vol = vinfo_subvol($target, $source->{NAME})) {
    my $err_msg = "Please delete stray subvolume: \"btrfs subvolume delete $err_vol->{PRINT}\"";
    ABORTED($config_target, "Target subvolume \"$err_vol->{PRINT}\" already exists");
    FIX_MANUALLY($config_target, $err_msg);
    ERROR ABORTED_TEXT($config_target) . ", aborting send/receive of: $source->{PRINT}";
    ERROR $err_msg;
    return undef;
  }

  if($incremental)
  {
    # create backup from latest common
    if($parent) {
      INFO "Creating incremental backup...";
    }
    elsif($incremental ne "strict") {
      INFO "No common parent subvolume present, creating non-incremental backup...";
    }
    else {
      WARN "Backup to $target->{PRINT} failed: no common parent subvolume found for \"$source->{PRINT}\", and option \"incremental\" is set to \"strict\"";
      ABORTED($config_target, "No common parent subvolume found, and option \"incremental\" is set to \"strict\"");
      return undef;
    }
    # add extra clone_src if "incremental_clones" is set
    my $ic = config_key($target, "incremental_clones");
    push @clone_src, map { --$ic < 0 ? () : $_ } @$clone_src_extra if($ic);
  }
  else {
    INFO "Creating non-incremental backup...";
    $parent = undef;
    @clone_src = ();
    delete $info{parent};
  }

  my $ret;
  my $vol_received;
  my $raw_info;
  if($target_type eq "send-receive")
  {
    $ret = btrfs_send_receive($source, $target, $parent, \@clone_src, \$vol_received);
    ABORTED($config_target, "Failed to send/receive subvolume") unless($ret);
  }
  elsif($target_type eq "raw")
  {
    unless($dryrun) {
      # make sure we know the source uuid
      if($source->{node}{uuid} =~ /^$fake_uuid_prefix/) {
        DEBUG "Fetching uuid of new subvolume: $source->{PRINT}";
        my $detail = btrfs_subvolume_show($source);
        return undef unless($detail);
        die unless($detail->{uuid});
        $source->{node}{uuid} = $detail->{uuid};
        $uuid_cache{$detail->{uuid}} = $source->{node};
      }
    }
    $ret = btrfs_send_to_file($source, $target, $parent, \$vol_received, \$raw_info);
    ABORTED($config_target, "Failed to send subvolume to raw file") unless($ret);
  }
  else
  {
    die "Illegal target type \"$target_type\"";
  }

  # inject fake vinfo

  # NOTE: it's not possible to add (and compare) correct target $detail
  # from btrfs_send_receive(), as source detail also has fake uuid.
  if($ret) {
    vinfo_inject_child($target, $vol_received, {
      # NOTE: this is not necessarily the correct parent_uuid (on
      # receive, btrfs-progs picks the uuid of the first (lowest id)
      # matching possible parent), whereas the target_parent is the
      # first from _correlated_nodes().
      #
      # NOTE: the parent_uuid of an injected receive target is not used
      # anywhere in btrbk at the time of writing
      parent_uuid    => $parent ? $info{target_parent_node}->{uuid} : '-',
      received_uuid  => $source->{node}{received_uuid} eq '-' ? $source->{node}{uuid} : $source->{node}{received_uuid},
      readonly       => 1,
      TARGET_TYPE    => $target_type,
      FORCE_PRESERVE => 'preserve forced: created just now',
    }, $raw_info);
  }

  # add info to $config->{SUBVOL_RECEIVED}
  $info{received_type} = $target_type || die;
  $info{received_subvolume} = $vol_received || die;
  $target->{SUBVOL_RECEIVED} //= [];
  push(@{$target->{SUBVOL_RECEIVED}}, \%info);
  unless($ret) {
    $info{ERROR} = 1;
    return undef;
  }
  return 1;
}


# sets $result_vinfo->{CONFIG}->{ABORTED} on failure
# sets $result_vinfo->{SUBVOL_DELETED}
sub macro_delete($$$$$;@)
{
  my $root_subvol = shift || die;
  my $subvol_dir = shift // die;
  my $subvol_basename = shift // die;
  my $result_vinfo = shift || die;
  my $schedule_options = shift || die;
  my %delete_options = @_;
  $subvol_dir =~ s/\/+$//;

  my @schedule;
  foreach my $vol (@{vinfo_subvol_list($root_subvol)}) {
    unless($vol->{node}{BTRBK_DATE} &&
           ($vol->{SUBVOL_DIR} eq $subvol_dir) &&
           ($vol->{node}{BTRBK_BASENAME} eq $subvol_basename)) {
      TRACE "Target subvolume does not match btrbk filename scheme, skipping: $vol->{PRINT}" if($do_trace);
      next;
    }
    push(@schedule, { value      => $vol,
                      # name       => $vol->{PRINT},  # only for logging
                      btrbk_date => $vol->{node}{BTRBK_DATE},
                      preserve   => $vol->{node}{FORCE_PRESERVE},
                     });
  }
  my (undef, $delete) = schedule(
    %$schedule_options,
    schedule => \@schedule,
    preserve_date_in_future => 1,
   );

  if($delete_options{qgroup}->{destroy}) {
    # NOTE: we do not abort on qgroup destroy errors
    btrfs_qgroup_destroy($_, %{$delete_options{qgroup}}) foreach(@$delete);
  }

  my @delete_success = btrfs_subvolume_delete($delete, %delete_options);
  $subvol_dir .= '/' if($subvol_dir ne "");
  INFO "Deleted " . scalar(@delete_success) . " subvolumes in: $root_subvol->{PRINT}/$subvol_dir$subvol_basename.*";
  $result_vinfo->{SUBVOL_DELETED} //= [];
  push @{$result_vinfo->{SUBVOL_DELETED}}, @delete_success;

  if(scalar(@delete_success) == scalar(@$delete)) {
    return 1;
  }
  else {
    ABORTED($result_vinfo, "Failed to delete subvolume");
    return undef;
  }
}


sub macro_archive_target($$$;$)
{
  my $sroot = shift || die;
  my $droot = shift || die;
  my $snapshot_name = shift // die;
  my $schedule_options = shift // {};
  my @schedule;

  # NOTE: this is pretty much the same as "resume missing"
  my $has_unexpected_location = 0;
  foreach my $svol (@{vinfo_subvol_list($sroot, readonly => 1, btrbk_direct_leaf => $snapshot_name, sort => 'path')})
  {
    next if(get_receive_targets($droot, $svol, exact => 1, warn => 1, ret_unexpected => \$has_unexpected_location));
    if(my $ff = vinfo_match(\@exclude_vf, $svol)) {
      INFO "Skipping archive candidate \"$svol->{PRINT}\": Match on exclude pattern \"$ff->{unparsed}\"";
      next;
    }
    DEBUG "Adding archive candidate: $svol->{PRINT}";

    push @schedule, { value      => $svol,
                      btrbk_date => $svol->{node}{BTRBK_DATE},
                      preserve   => $svol->{node}{FORCE_PRESERVE},
                    };
  }

  if($has_unexpected_location) {
    ABORTED($droot, "Receive targets of archive candidates exist at unexpected location");
    WARN "Skipping archiving of \"$sroot->{PRINT}/${snapshot_name}.*\": " . ABORTED_TEXT($droot);
    return undef;
  }

  # add all present archives as informative_only: these are needed for correct results of schedule()
  my $last_dvol_date;
  foreach my $dvol (@{vinfo_subvol_list($droot, readonly => 1, btrbk_direct_leaf => $snapshot_name)})
  {
    my $btrbk_date = $dvol->{node}{BTRBK_DATE};
    push @schedule, { informative_only => 1,
                      value            => $dvol,
                      btrbk_date       => $btrbk_date,
                    };

    # find last present archive (by btrbk_date, needed for archive_exclude_older below)
    $last_dvol_date = $btrbk_date if((not defined($last_dvol_date)) || (cmp_date($btrbk_date, $last_dvol_date) > 0));
  }

  my ($preserve, undef) = schedule(
    schedule => \@schedule,
    preserve => config_preserve_hash($droot, "archive"),
    preserve_threshold_date => (config_key($droot, "archive_exclude_older") ? $last_dvol_date : undef),
    result_preserve_action_text => 'archive',
    result_delete_action_text   => '',
    %$schedule_options
  );
  my @archive = grep defined, @$preserve;   # remove entries with no value from list (archive subvolumes)
  my $archive_total = scalar @archive;
  my $archive_success = 0;
  foreach my $svol (@archive)
  {
    my ($clone_src, $clone_src_extra, $target_parent_node);
    my $parent = get_best_parent($svol, $sroot, $droot,
                                 strict_related     => 0,
                                 clone_src          => \$clone_src,
                                 clone_src_extra    => \$clone_src_extra,
                                 target_parent_node => \$target_parent_node);
    if(macro_send_receive(source => $svol,
                          target => $droot,
                          parent => $parent,  # this is <undef> if no suitable parent found
                          clone_src          => $clone_src,
                          clone_src_extra    => $clone_src_extra,
                          target_parent_node => $target_parent_node,
                         ))
    {
      $archive_success++;
    }
    else {
      ERROR("Error while archiving subvolumes, aborting");
      last;
    }
  }

  if($archive_total) {
    INFO "Archived $archive_success/$archive_total subvolumes";
  } else {
    INFO "No missing archives found";
  }

  return $archive_success;
}


sub cmp_date($$)
{
  return (($_[0]->[0] <=> $_[1]->[0]) ||  # unix time
          ($_[0]->[1] <=> $_[1]->[1]));   # NN
}


sub schedule(@)
{
  my %args = @_;
  my $schedule        = $args{schedule} || die;
  my $preserve        = $args{preserve} || die;
  my $preserve_date_in_future = $args{preserve_date_in_future};
  my $preserve_threshold_date = $args{preserve_threshold_date};
  my $results_list    = $args{results};
  my $result_hints    = $args{result_hints} // {};
  my $result_preserve_action_text = $args{result_preserve_action_text};
  my $result_delete_action_text   = $args{result_delete_action_text} // 'delete';

  my $preserve_day_of_week = $preserve->{dow} || die;
  my $preserve_hour_of_day = $preserve->{hod} // die;
  my $preserve_min_n       = $preserve->{min_n};
  my $preserve_min_q       = $preserve->{min_q};
  my $preserve_hourly      = $preserve->{h};
  my $preserve_daily       = $preserve->{d};
  my $preserve_weekly      = $preserve->{w};
  my $preserve_monthly     = $preserve->{m};
  my $preserve_yearly      = $preserve->{y};

  DEBUG "Schedule: " . format_preserve_matrix($preserve, format => "debug_text");

  #  0    1    2     3     4    5     6     7     8
  #  sec, min, hour, mday, mon, year, wday, yday, isdst

  # sort the schedule, ascending by date
  # regular entries come in front of informative_only
  my @sorted_schedule = sort { cmp_date($a->{btrbk_date}, $b->{btrbk_date} ) ||
                               (($a->{informative_only} ? ($b->{informative_only} ? 0 : 1) : ($b->{informative_only} ? -1 : 0)))
                             } @$schedule;

  DEBUG "Scheduler reference time: " . timestamp(\@tm_now, 'debug-iso');

  # first, do our calendar calculations
  # - days start on $preserve_hour_of_day (or 00:00 if timestamp_format=short)
  # - weeks start on $preserve_day_of_week
  # - months start on first $preserve_day_of_week of month
  # - years start on first $preserve_day_of_week of year
  # NOTE: leap hours are NOT taken into account for $delta_hours
  my $now_h = timegm_nocheck( 0, 0, $tm_now[2], $tm_now[3], $tm_now[4], $tm_now[5] ); # use timelocal() here (and below) if you want to honor leap hours

  foreach my $href (@sorted_schedule)
  {
    my @tm = localtime($href->{btrbk_date}->[0]);
    my $has_exact_time = $href->{btrbk_date}->[2];
    my $delta_hours_from_hod = $tm[2] - ($has_exact_time ? $preserve_hour_of_day : 0);
    my $delta_days_from_eow = $tm[6] - $day_of_week_map{$preserve_day_of_week};
    if($delta_hours_from_hod < 0) {
      $delta_hours_from_hod += 24;
      $delta_days_from_eow -= 1;
    }
    if($delta_days_from_eow < 0) {
      $delta_days_from_eow += 7;
    }
    my $month_corr = $tm[4];  # [0..11]
    my $year_corr = $tm[5];
    if($tm[3] <= $delta_days_from_eow) {
      # our month/year start on first $preserve_day_of_week, corrected value
      $month_corr -= 1;
      if($month_corr < 0) {
        $month_corr = 11;
        $year_corr -= 1;
      }
    }

    # check timegm: ignores leap hours
    my $delta_hours  = int(($now_h - timegm_nocheck( 0, 0, $tm[2], $tm[3], $tm[4], $tm[5] )  ) / (60 * 60));
    my $delta_days   = int(($delta_hours + $delta_hours_from_hod) / 24);  # days from beginning of day
    my $delta_weeks  = int(($delta_days + $delta_days_from_eow) / 7);  # weeks from beginning of week
    my $delta_years  = ($tm_now[5] - $year_corr);
    my $delta_months = $delta_years * 12 + ($tm_now[4] - $month_corr);

    $href->{delta_hours}   = $delta_hours;
    $href->{delta_days}    = $delta_days;
    $href->{delta_weeks}   = $delta_weeks;
    $href->{delta_months}  = $delta_months;
    $href->{delta_years}   = $delta_years;

    # these are only needed for text output (format_preserve_delta)
    $href->{year} = $year_corr + 1900;
    $href->{month} = $month_corr + 1;
    $href->{delta_hours_from_hod} = $delta_hours_from_hod;
    $href->{delta_days_from_eow} = $delta_days_from_eow;
    $href->{real_hod} = $preserve_hour_of_day if($has_exact_time);

    if($preserve_date_in_future && ($delta_hours < 0)) {
      $href->{preserve} = "preserve forced: " . -($delta_hours) . " hours in the future";
    }
  }

  my %first_in_delta_hours;
  my %first_in_delta_days;
  my %first_in_delta_weeks;
  my %first_weekly_in_delta_months;
  my %first_monthly_in_delta_years;

  # filter "preserve all within N days/weeks/..."
  foreach my $href (@sorted_schedule) {
    if($preserve_min_q) {
      if($preserve_min_q eq 'all') {
        $href->{preserve} = "preserve min: all";
      } elsif($preserve_min_q eq 'h') {
        $href->{preserve} = "preserve min: $href->{delta_hours} hours ago"   if($href->{delta_hours}  <= $preserve_min_n);
      } elsif($preserve_min_q eq 'd') {
        $href->{preserve} = "preserve min: $href->{delta_days} days ago"     if($href->{delta_days}   <= $preserve_min_n);
      } elsif($preserve_min_q eq 'w') {
        $href->{preserve} = "preserve min: $href->{delta_weeks} weeks ago"   if($href->{delta_weeks}  <= $preserve_min_n);
      } elsif($preserve_min_q eq 'm') {
        $href->{preserve} = "preserve min: $href->{delta_months} months ago" if($href->{delta_months} <= $preserve_min_n);
      } elsif($preserve_min_q eq 'y') {
        $href->{preserve} = "preserve min: $href->{delta_years} years ago"   if($href->{delta_years}  <= $preserve_min_n);
      }
    }
    $first_in_delta_hours{$href->{delta_hours}} //= $href;
  }
  if($preserve_min_q && ($preserve_min_q eq 'latest') && (scalar @sorted_schedule)) {
    my $href = $sorted_schedule[-1];
    $href->{preserve} = 'preserve min: latest';
  }

  # filter hourly, daily, weekly, monthly, yearly
  foreach (sort {$b <=> $a} keys %first_in_delta_hours) {
    my $href = $first_in_delta_hours{$_} || die;
    if($preserve_hourly && (($preserve_hourly eq 'all') || ($href->{delta_hours} <= $preserve_hourly))) {
      $href->{preserve} = "preserve hourly: first of hour, $href->{delta_hours} hours ago";
    }
    $first_in_delta_days{$href->{delta_days}} //= $href;
  }
  foreach (sort {$b <=> $a} keys %first_in_delta_days) {
    my $href = $first_in_delta_days{$_} || die;
    if($preserve_daily && (($preserve_daily eq 'all') || ($href->{delta_days} <= $preserve_daily))) {
      $href->{preserve} = "preserve daily: first of day" . ($href->{real_hod} ? sprintf(" (starting at %02u:00)", $href->{real_hod}) : "") . ", $href->{delta_days} days ago"
                          . (defined($href->{real_hod}) ? ($href->{delta_hours_from_hod} ? ", $href->{delta_hours_from_hod}h after " : ", at ") . sprintf("%02u:00", $href->{real_hod}) : "");
    }
    $first_in_delta_weeks{$href->{delta_weeks}} //= $href;
  }
  foreach (sort {$b <=> $a} keys %first_in_delta_weeks) {
    my $href = $first_in_delta_weeks{$_} || die;
    if($preserve_weekly && (($preserve_weekly eq 'all') || ($href->{delta_weeks} <= $preserve_weekly))) {
      $href->{preserve} = "preserve weekly: $href->{delta_weeks} weeks ago," . _format_preserve_delta($href, $preserve_day_of_week);
    }
    $first_weekly_in_delta_months{$href->{delta_months}} //= $href;
  }
  foreach (sort {$b <=> $a} keys %first_weekly_in_delta_months) {
    my $href = $first_weekly_in_delta_months{$_} || die;
    if($preserve_monthly && (($preserve_monthly eq 'all') || ($href->{delta_months} <= $preserve_monthly))) {
      $href->{preserve} = "preserve monthly: first weekly of month $href->{year}-" . sprintf("%02u", $href->{month}) . " ($href->{delta_months} months ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
    }
    $first_monthly_in_delta_years{$href->{delta_years}} //= $href;
  }
  foreach (sort {$b <=> $a} keys %first_monthly_in_delta_years) {
    my $href = $first_monthly_in_delta_years{$_} || die;
    if($preserve_yearly && (($preserve_yearly eq 'all') || ($href->{delta_years} <= $preserve_yearly))) {
      $href->{preserve} = "preserve yearly: first weekly of year $href->{year} ($href->{delta_years} years ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
    }
  }

  # assemble results
  my @delete;
  my @preserve;
  my %result_base = ( %$preserve,
                      scheme => format_preserve_matrix($preserve),
                      %$result_hints,
                     );
  my $count_defined = 0;
  foreach my $href (@sorted_schedule)
  {
    my $result_reason_text = $href->{preserve};
    my $result_action_text;

    unless($href->{informative_only}) {
      if($href->{preserve}) {
        if($preserve_threshold_date && (cmp_date($href->{btrbk_date}, $preserve_threshold_date) <= 0)) {
          # older than threshold, do not add to preserve list
          $result_reason_text = "$result_reason_text, ignored (archive_exclude_older) older than existing archive";
        }
        else {
          push(@preserve, $href->{value});
          $result_action_text = $result_preserve_action_text;
        }
      }
      else {
        push(@delete, $href->{value});
        $result_action_text = $result_delete_action_text;
      }
      $count_defined++;
    }

    TRACE join(" ", "schedule: $href->{value}{PRINT}", ($href->{informative_only} ? "(informative_only)" : uc($result_action_text || "-")), ($result_reason_text // "-")) if($do_trace && $href->{value} && $href->{value}{PRINT});
    push @$results_list, { %result_base,
                           action => $result_action_text,
                           reason => $result_reason_text,
                           value => $href->{value},
                         } if($results_list);
  }
  DEBUG "Preserving " . @preserve . "/" . $count_defined . " items";
  return (\@preserve, \@delete);
}


sub _format_preserve_delta($$$)
{
  my $href = shift;
  my $preserve_day_of_week = shift;
  my $s = "";
  $s .= " $href->{delta_days_from_eow}d" if($href->{delta_days_from_eow});
  $s .= " $href->{delta_hours_from_hod}h" if($href->{delta_hours_from_hod});
  return ($s ? "$s after " : " at ") . $preserve_day_of_week . (defined($href->{real_hod}) ? sprintf(" %02u:00", $href->{real_hod}) : "");
}


sub format_preserve_matrix($@)
{
  my $preserve = shift || die;
  my %opts = @_;
  my $format = $opts{format} // "short";

  if($format eq "debug_text") {
    my @out;
    my %trans = ( h => 'hours', d => 'days', w => 'weeks', m => 'months', y => 'years' );
    if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
      push @out, "all forever";
    }
    else {
      push @out, "latest" if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
      push @out, "all within $preserve->{min_n} $trans{$preserve->{min_q}}" if($preserve->{min_n} && $preserve->{min_q});
      push @out, "first of day (starting at " . sprintf("%02u:00", $preserve->{hod}) . ") for $preserve->{d} days" if($preserve->{d});
      unless($preserve->{d} && ($preserve->{d} eq 'all')) {
        push @out, "first daily in week (starting on $preserve->{dow}) for $preserve->{w} weeks" if($preserve->{w});
        unless($preserve->{w} && ($preserve->{w} eq 'all')) {
          push @out, "first weekly of month for $preserve->{m} months" if($preserve->{m});
          unless($preserve->{m} && ($preserve->{m} eq 'all')) {
            push @out, "first weekly of year for $preserve->{y} years" if($preserve->{y});
          }
        }
      }
    }
    return 'preserving ' . join('; ', @out);
  }

  my $s = "";
  if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
    $s = '*d+';
  }
  else {
    # $s .= '.+' if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
    $s .= $preserve->{min_n} . $preserve->{min_q} . '+' if($preserve->{min_n} && $preserve->{min_q});
    foreach (qw(h d w m y)) {
      my $val = $preserve->{$_} // 0;
      next unless($val);
      $val = '*' if($val eq 'all');
      $s .= ($s ? ' ' : '') . $val . $_;
    }
    if(($format ne "config") && ($preserve->{d} || $preserve->{w} || $preserve->{m} || $preserve->{y})) {
      $s .= " ($preserve->{dow}, " . sprintf("%02u:00", $preserve->{hod}) . ")";
    }
  }
  return $s;
}


sub timestamp($$;$)
{
  my $time = shift // die;  # unixtime, or arrayref from localtime()
  my $format = shift;
  my $tm_is_utc = shift;
  my @tm = ref($time) ? @$time : localtime($time);
  my $ts;
  # NOTE: can't use POSIX::strftime(), as "%z" always prints offset of local timezone!

  if($format eq "short") {
    return sprintf('%04u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3]);
  }
  elsif($format eq "long") {
    return sprintf('%04u%02u%02uT%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1]);
  }
  elsif($format eq "long-iso") {
    $ts = sprintf('%04u%02u%02uT%02u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
  }
  elsif($format eq "debug-iso") {
    $ts = sprintf('%04u-%02u-%02uT%02u:%02u:%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
  }
  else { die; }

  if($tm_is_utc) {
    $ts .= '+0000'; # or 'Z'
  } else {
    my $offset = timegm(@tm) - timelocal(@tm);
    if($offset < 0) { $ts .= '-'; $offset = -$offset; } else { $ts .= '+'; }
    my $h = int($offset / (60 * 60));
    die if($h > 24); # sanity check, something went really wrong
    $ts .= sprintf('%02u%02u', $h, int($offset / 60) % 60);
  }
  return $ts;

  return undef;
}


sub print_header(@)
{
  my %args = @_;
  my $config = $args{config};

  print "--------------------------------------------------------------------------------\n";
  print "$args{title} ($VERSION_INFO)\n\n";
  if($args{time}) {
    print "    Date:   " . localtime($args{time}) . "\n";
  }
  if($config) {
    print "    Config: " . config_key($config, "SRC_FILE") . "\n";
  }
  if($dryrun) {
    print "    Dryrun: YES\n";
  }
  if($config && $config->{CMDLINE_FILTER_LIST}) {
    my @list = @{$config->{CMDLINE_FILTER_LIST}};
    print "    Filter: ";
    print join("\n            ", @list);
    print "\n";
  }
  if($args{info}) {
    print "\n" . join("\n", grep(defined, @{$args{info}})) . "\n";
  }
  if($args{options} && (scalar @{$args{options}})) {
    print "\nOptions:\n    ";
    print join("\n    ", @{$args{options}});
    print "\n";
  }
  if($args{legend}) {
    print "\nLegend:\n    ";
    print join("\n    ", @{$args{legend}});
    print "\n";
  }
  print "--------------------------------------------------------------------------------\n";
  print "\n" if($args{paragraph});
}


sub print_footer($$)
{
  my $config = shift;
  my $exit_status = shift;
  if($exit_status) {
    print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
    print "Please check warning and error messages above.\n";
    my @fix_manually_text = _config_collect_values($config, "FIX_MANUALLY");
    if(scalar(@fix_manually_text)) {
      my @unique = do { my %seen; grep { !$seen{$_}++ } @fix_manually_text };
      print join("\n", @unique) . "\n";
    }
  }
  if($dryrun) {
    print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
  }
}


sub print_table($;$)
{
  my $data = shift;
  my $spacing = shift // "  ";
  my $maxlen = 0;
  foreach (@$data) {
    $maxlen = length($_->[0]) if($maxlen < length($_->[0]));
  }
  foreach (@$data) {
    print $_->[0] . ((' ' x ($maxlen - length($_->[0]))) . $spacing) . $_->[1] . "\n";
  }
}


sub print_formatted(@)
{
  my $format_key = shift || die;
  my $data = shift || die;
  my $default_format = "table";
  my %args = @_;
  my $title = $args{title};
  my $table_format = ref($format_key) ? $format_key : $table_formats{$format_key};
  my $format = $args{output_format} || $output_format || $default_format;
  my $pretty = $args{pretty} // $output_pretty;
  my $no_header = $args{no_header};
  my $fh = $args{outfile} // *STDOUT;
  my $table_spacing = 2;
  my $empty_cell_char = $args{empty_cell_char} // "-";

  my @keys;
  my %ralign;
  my %hide_column;
  if($format =~ s/^col:\s*(h:)?\s*//) {
    $no_header = 1 if($1);
    foreach (split(/\s*,\s*/, $format)) {
      $ralign{$_} = 1 if s/:R(ALIGN)?$//i;
      push @keys, lc($_);
    }
  }
  else {
    unless(exists($table_format->{$format})) {
      WARN "Unsupported output format \"$format\", defaulting to \"$default_format\" format.";
      $format = $default_format;
    }
    @keys   = @{$table_format->{$format}};
    %ralign = %{$table_format->{RALIGN} // {}};
  }
  # strips leading "-" from @keys
  %hide_column = map { $_ => 1 } grep { s/^-// } @keys;

  if($format eq "single_column")
  {
    # single-column: newline separated values, no headers
    my $key = $keys[0];
    foreach (grep defined, map $_->{$key}, @$data) {
      print $fh $_ . "\n" if($_ ne "");
    }
  }
  elsif($format eq "raw")
  {
    # output: key0="value0" key1="value1" ...
    foreach my $row (@$data) {
      print $fh "format=\"$format_key\" ";
      print $fh join(' ', map { "$_=\"" . ($row->{$_} // "") . "\""; } @keys) . "\n";
    }
  }
  elsif(($format eq "tlog") || ($format eq "syslog"))
  {
    # output: value0 value1, ...
    unless($no_header) {
      print $fh join(' ', map uc($_), @keys) . "\n"; # unaligned upper case headings
    }
    foreach my $row (@$data) {
      my $line = join(' ', map { ((defined($row->{$_}) && ($_ eq "message")) ? '# ' : '') . ($row->{$_} // "-") } @keys);
      if($format eq "syslog") { # dirty hack, ignore outfile on syslog format
        syslog($line);
     } else {
        print $fh ($line . "\n");
      }
    }
  }
  else
  {
    # sanitize and calculate maxlen for each column
    my %maxlen = map { $_ => $no_header ? 0 : length($_) } @keys;
    my @formatted_data;
    foreach my $row (@$data) {
      my %formatted_row;
      foreach my $key (@keys) {
        my $val = $row->{$key};
        $val = join(',', @$val) if(ref $val eq "ARRAY");

        $hide_column{$key} = 0 if(defined($val));
        $val = $empty_cell_char if(!defined($val) || ($val eq ""));
        $formatted_row{$key} = $val;
        $maxlen{$key} = length($val) if($maxlen{$key} < length($val));
      }
      push @formatted_data, \%formatted_row;
    }
    my @visible_keys = grep !$hide_column{$_}, @keys;

    # print title
    if($title) {
      print $fh "$title\n";
      print $fh '-' x length($title) . "\n"; # separator line
    }

    # print keys (headings)
    unless($no_header) {
      my $fill = 0;
      foreach (@visible_keys) {
        print $fh ' ' x $fill;
        $fill = $maxlen{$_} - length($_);
        if($pretty) {
           # use aligned lower case headings (with separator line below)
          if($ralign{$_}) {
            print $fh ' ' x $fill;
            $fill = 0;
          }
          print $fh $_;
        } else {
          print $fh uc($_); # default unaligned upper case headings
        }
        $fill += $table_spacing;
      }
      print $fh "\n";

      $fill = 0;
      if($pretty) { # separator line after header
        foreach (@visible_keys) {
          print $fh ' ' x $fill;
          print $fh '-' x $maxlen{$_};
          $fill = $table_spacing;
        }
        print $fh "\n";
        # alternative (all above in one line ;)
        #print $fh join(' ' x $table_spacing, map { '-' x ($maxlen{$_}) } @keys) . "\n";
      }
    }

    # print values
    foreach my $row (@formatted_data) {
      my $fill = 0;
      foreach (@visible_keys) {
        my $val = $row->{$_};
        print $fh ' ' x $fill;
        $fill = $maxlen{$_} - length($val);
        if($ralign{$_}) {
          print $fh ' ' x $fill;
          $fill = 0;
        }
        print $fh $val;
        $fill += $table_spacing;
      }
      print $fh "\n";
    }

    # print additional newline for paragraphs
    if($args{paragraph}) {
      print $fh "\n";
    }
  }
}


sub print_size($)
{
  my $size = shift;
  if($output_format && ($output_format eq "raw")) {
    return $size;
  }
  return "-" if($size == 0);
  my ($unit, $mul);
  if(@output_unit) {
    ($unit, $mul) = @output_unit;
  }
  else {
    ($unit, $mul) = ("KiB", 1024);
    ($unit, $mul) = ("MiB", $mul * 1024) if($size > $mul * 1024);
    ($unit, $mul) = ("GiB", $mul * 1024) if($size > $mul * 1024);
    ($unit, $mul) = ("TiB", $mul * 1024) if($size > $mul * 1024);
  }
  return $size if($mul == 1);
  return sprintf('%.2f', ($size / $mul)) . " $unit";
}


sub _origin_tree
{
  my $prefix = shift;
  my $node = shift // die;
  my $lines = shift;
  my $nodelist = shift;
  my $depth = shift // 0;
  my $seen = shift // [];
  my $norecurse = shift;
  my $uuid = $node->{uuid} || die;

  # cache a bit, this might be large
  # note: root subvolumes dont have REL_PATH
  $nodelist //= [ (sort { ($a->{REL_PATH} // "") cmp ($b->{REL_PATH} // "") } values %uuid_cache) ];

  my $prefix_spaces = ' ' x (($depth * 4) - ($prefix ? 4 : 0));
  push(@$lines, { tree          => "${prefix_spaces}${prefix}" . _fs_path($node),
                  uuid          => $node->{uuid},
                  parent_uuid   => $node->{parent_uuid},
                  received_uuid => $node->{received_uuid},
                });

  # handle deep recursion
  return 0 if(grep /^$uuid$/, @$seen);

  if($node->{parent_uuid} ne '-') {
    my $parent_node = $uuid_cache{$node->{parent_uuid}};
    if($parent_node) {
      if($norecurse) {
        push(@$lines,{ tree          => "${prefix_spaces}    ^-- ...",
                       uuid          => $parent_node->{uuid},
                       parent_uuid   => $parent_node->{parent_uuid},
                       received_uuid => $parent_node->{received_uuid},
                       recursion     => 'stop_recursion',
                     });
        return 0;
      }
      if($parent_node->{readonly}) {
        _origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1, undef, 1);  # end recursion
      }
      else {
        _origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1);
      }
    }
    else {
      push(@$lines,{ tree => "${prefix_spaces}    ^-- <unknown uuid=$node->{parent_uuid}>" });
    }
  }

  return 0 if($norecurse);
  push(@$seen, $uuid);

  if($node->{received_uuid} ne '-') {
    my $received_uuid = $node->{received_uuid};
    my @receive_parents; # there should be only one!
    my @receive_twins;

    foreach (@$nodelist) {
      next if($_->{uuid} eq $uuid);
      if($received_uuid eq $_->{uuid} && $_->{readonly}) {
        _origin_tree("", $_, \@receive_parents, $nodelist, $depth, $seen);
      }
      elsif(($_->{received_uuid} ne '-') && ($received_uuid eq $_->{received_uuid}) && $_->{readonly}) {
        _origin_tree("", $_, \@receive_twins, $nodelist, $depth, $seen, 1);  # end recursion
      }
    }
    push @$lines, @receive_twins;
    push @$lines, @receive_parents;
  }

  return 0;
}


sub exit_status
{
  my $config = shift;
  foreach my $subsection (@{$config->{SUBSECTION}}) {
    return 10 if(IS_ABORTED($subsection, "abort_"));
    return 10 if(defined($subsection->{FIX_MANUALLY})); # treated as errors
    return 10 if(exit_status($subsection));
  }
  return 0;
}



MAIN:
{
  # NOTE: Since v0.26.0, btrbk does not enable taint mode (perl -T) by
  # default, and does not hardcode $PATH anymore.
  #
  # btrbk still does all taint checks, and can be run in taint mode.
  # In order to enable taint mode, run `perl -T btrbk`.
  #
  # see: perlrun(1), perlsec(1)
  #
  my $taint_mode_enabled = eval '${^TAINT}';
  if($taint_mode_enabled) {
    # we are running in tainted mode (perl -T), sanitize %ENV
    delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};

    # in taint mode, perl needs an untainted $PATH.
    $ENV{PATH} = '/sbin:/bin:/usr/sbin:/usr/bin';
  }

  Getopt::Long::Configure qw(gnu_getopt);
  my $start_time = time;
  @tm_now = localtime($start_time);

  my @config_override_cmdline;
  my @exclude_cmdline;
  my ($config_cmdline, $lockfile_cmdline, $print_schedule,
      $preserve_snapshots, $preserve_backups, $wipe_snapshots, $skip_snapshots, $skip_backups,
      $archive_raw, $extents_related,
      $resume_only_DEPRECATED,  # as of btrbk-v0.26.0
     );

  # Calling btrbk via "lsbtr" symlink acts as an alias for "btrbk ls",
  # while also changing the semantics of the command line options.
  $program_name = $0;
  $program_name =~ s/^.*\///; # remove path
  my @getopt_options = (
    # common options
    'help|h'             => sub { VERSION_MESSAGE(); HELP_MESSAGE(0); exit 0; },
    'version'            => sub { VERSION_MESSAGE(); exit 0; },
    'quiet|q'            => \$quiet,
    'verbose|v'          => sub { $loglevel = ($loglevel =~ /^[0-9]+$/) ? $loglevel+1 : 2; },
    'loglevel|l=s'       => \$loglevel,
    'format=s'           => \$output_format,
    'single-column|1'    => sub { $output_format = "single_column" },
    'pretty'             => \$output_pretty,
    'config|c=s'         => \$config_cmdline,
    'override=s'         => \@config_override_cmdline,  # e.g. --override=incremental=no
    'lockfile=s'         => \$lockfile_cmdline,
   );
  push @getopt_options, ($program_name eq "lsbtr") ? (
    # "lsbtr" options
    'long|l'             => sub { $output_format = "table" },
    'uuid|u'             => sub { $output_format = "long" },
    'raw'                => sub { $output_format = "raw" },
   ) : (
     # "btrbk" options
    'dry-run|n'          => \$dryrun,
    'exclude=s'          => \@exclude_cmdline,
    'preserve|p'         => sub { $preserve_snapshots = "preserve", $preserve_backups = "preserve" },
    'preserve-snapshots' => sub { $preserve_snapshots = "preserve-snapshots" },
    'preserve-backups'   => sub { $preserve_backups = "preserve-backups" },
    'wipe'               => \$wipe_snapshots,
    'resume-only|r'      => \$resume_only_DEPRECATED,
    'progress'           => \$show_progress,
    'related'            => \$extents_related,
    'table|t'            => sub { $output_format = "table" },
    'long|L'             => sub { $output_format = "long" },
    'print-schedule|S'   => \$print_schedule,
    'raw'                => \$archive_raw,
    'bytes'              => sub { @output_unit = ("",    1                         ) },
    'kbytes'             => sub { @output_unit = ("KiB", 1024                      ) },
    'mbytes'             => sub { @output_unit = ("MiB", 1024 * 1024               ) },
    'gbytes'             => sub { @output_unit = ("GiB", 1024 * 1024 * 1024        ) },
    'tbytes'             => sub { @output_unit = ("TiB", 1024 * 1024 * 1024 * 1024 ) },
   );
  unless(GetOptions(@getopt_options)) {
    VERSION_MESSAGE();
    HELP_MESSAGE(0);
    exit 2;
  }
  if($program_name eq "lsbtr") {
    unshift @ARGV, './' unless(@ARGV); # default to current path
    unshift @ARGV, "ls"; # implicit "btrbk ls"
  }
  my $command = shift @ARGV;
  unless($command) {
    VERSION_MESSAGE();
    HELP_MESSAGE(0);
    exit 2;
  }

  # assign command line options
  @config_src = ( $config_cmdline ) if($config_cmdline);
  $loglevel = { error => 0, warn => 1, warning => 1, info => 2, debug => 3, trace => 4 }->{$loglevel} // $loglevel;
  unless($loglevel =~ /^[0-9]+$/) {
    ERROR "Unknown loglevel: $loglevel";
    HELP_MESSAGE(0);
    exit 2;
  }
  $do_trace = 1 if($loglevel >= 4);
  require_data_dumper() if($do_trace || ($VERSION =~ /-dev$/));

  # DEPRECATED options
  if($resume_only_DEPRECATED) {
    WARN "Found deprecated command line option \"-r, --resume-only\": Use \"btrbk resume --preserve\"";
    $skip_snapshots = "resume-only";
    $preserve_backups = "resume-only";
    $preserve_snapshots = "resume-only";
  }

  # check command line options
  if($show_progress && (not check_exe('mbuffer'))) {
    WARN 'Found option "--progress", but required executable "mbuffer" does not exist on your system. Please install "mbuffer".';
    $show_progress = 0;
  }
  my ($action_run, $action_usage, $action_resolve, $action_diff, $action_extents, $action_origin, $action_config_print, $action_list, $action_clean, $action_archive, $action_ls);
  my @filter_args;
  my @subvol_args;
  my $args_expected_min = 0;
  my $args_expected_max = 9999;
  my $fallback_default_config;
  my $subvol_args_allow_relative;
  my $subvol_args_init;
  if(($command eq "run") || ($command eq "dryrun")) {
    $action_run = 1;
    $dryrun = 1 if($command eq "dryrun");
    @filter_args = @ARGV;
  }
  elsif($command eq "snapshot") {
    $action_run = 1;
    $skip_backups = "snapshot";
    $preserve_backups = "snapshot";
    @filter_args = @ARGV;
  }
  elsif($command eq "resume") {
    $action_run = 1;
    $skip_snapshots = "resume";
    @filter_args = @ARGV;
  }
  elsif($command eq "prune") {
    $action_run = 1;
    $skip_snapshots = "prune";
    $skip_backups = "prune";
    @filter_args = @ARGV;
  }
  elsif ($command eq "clean") {
    $action_clean = 1;
    @filter_args = @ARGV;
  }
  elsif ($command eq "archive") {
    $action_archive = 1;
    $fallback_default_config = 1;
    $args_expected_min = $args_expected_max = 2;
    $subvol_args_allow_relative = 1;
    @subvol_args = @ARGV;
  }
  elsif ($command eq "usage") {
    $action_usage = 1;
    @filter_args = @ARGV;
  }
  elsif ($command eq "ls") {
    $action_ls = 1;
    $fallback_default_config = 1;
    $args_expected_min = 1;
    $subvol_args_allow_relative = 1;
    @subvol_args = @ARGV;
  }
  elsif ($command eq "diff") {
    $action_diff = 1;
    $fallback_default_config = 1;
    $args_expected_min = $args_expected_max = 2;
    $subvol_args_init = "restrict_same_fs deny_root_subvol";
    $subvol_args_allow_relative = 1;
    @subvol_args = @ARGV;
  }
  elsif ($command eq "extents") {
    my $subcommand = shift @ARGV // "";
    if(($subcommand eq "list") ||
       ($subcommand eq "diff")) {
      $action_extents = $subcommand;
    }
    else { # defaults to "list"
      unshift @ARGV, $subcommand;
      $action_extents = "list";
    }
    $fallback_default_config = 1;
    $args_expected_min = 1;
    $subvol_args_init = "restrict_same_fs";
    $subvol_args_allow_relative = 1;
    my $excl;
    foreach(@ARGV) {
      # subvol_arg... "exclusive" filter_arg...
      if($_ eq "exclusive") {
        $excl = 1;
      } else {
        push @subvol_args, $_;
        push @filter_args, $_ if($excl);
      }
    }
  }
  elsif ($command eq "origin") {
    $action_origin = 1;
    $args_expected_min = $args_expected_max = 1;
    $subvol_args_init = "deny_root_subvol";
    $subvol_args_allow_relative = 1;
    @subvol_args = @ARGV;
  }
  elsif($command eq "list") {
    my $subcommand = shift @ARGV // "";
    if(($subcommand eq "config") ||
       ($subcommand eq "volume") ||
       ($subcommand eq "source") ||
       ($subcommand eq "target"))
    {
      $action_list = $subcommand;
    }
    elsif(($subcommand eq "all") ||
          ($subcommand eq "snapshots") ||
          ($subcommand eq "backups") ||
          ($subcommand eq "latest"))
    {
      $action_resolve = $subcommand;
    }
    else {
      $action_resolve = "all";
      unshift @ARGV, $subcommand if($subcommand ne "");
    }
    @filter_args = @ARGV;
  }
  elsif($command eq "stats") {
    $action_resolve = "stats";
    @filter_args = @ARGV;
  }
  elsif ($command eq "config") {
    my $subcommand = shift @ARGV // "";
    @filter_args = @ARGV;
    if(($subcommand eq "print") || ($subcommand eq "print-all")) {
      $action_config_print = $subcommand;
    }
    elsif($subcommand eq "list") {
      $action_list = "config";
    }
    else {
      ERROR "Unknown subcommand for \"config\" command: $subcommand";
      HELP_MESSAGE(0);
      exit 2;
    }
  }
  else {
    ERROR "Unrecognized command: $command";
    HELP_MESSAGE(0);
    exit 2;
  }
  if(($args_expected_min > scalar(@ARGV)) || ($args_expected_max < scalar(@ARGV))) {
    ERROR "Incorrect number of arguments";
    HELP_MESSAGE(0);
    exit 2;
  }

  # input validation
  foreach (@subvol_args) {
    my ($url_prefix, $path) = check_url($_);
    if(!defined($path) && $subvol_args_allow_relative && ($url_prefix eq "")) {
      # map relative path to absolute
      if((-d $_) && ($_ =~ /^(.*)$/)) { # untaint ANY argument!
        $path = check_file(`readlink -f -q '$1'`, { absolute => 1 })
      }
    }
    unless(defined($path)) {
      ERROR "Bad argument: not a subvolume declaration: $_";
      HELP_MESSAGE(0);
      exit 2;
    }
    $_ = $url_prefix . $path;
  }
  my @filter_vf;
  foreach (@filter_args) {
    my $vf = vinfo_filter_statement($_);
    unless($vf) {
      ERROR "Bad argument: invalid filter statement: $_";
      HELP_MESSAGE(0);
      exit 2;
    }
    push @filter_vf, $vf;
  }
  foreach (@exclude_cmdline) {
    my $vf = vinfo_filter_statement($_);
    unless($vf) {
      ERROR "Bad argument: invalid filter statement: --exclude='$_'";
      HELP_MESSAGE(0);
      exit 2;
    }
    push @exclude_vf, $vf;
  }
  foreach(@config_override_cmdline) {
    if(/(.*?)=(.*)/) {
      my $key = $1;
      my $value = $2;
      unless(append_config_option(\%config_override, $key, $value, "OVERRIDE", error_statement => "in option \"--override\"")) {
        HELP_MESSAGE(0);
        exit 2;
      }
    }
    else {
      ERROR "Option \"override\" requires \"<config_option>=<value>\" format";
      HELP_MESSAGE(0);
      exit 2;
    }
  }
  if(defined($lockfile_cmdline)) {
    if($lockfile_cmdline =~ /^($file_match)$/) {
      $lockfile = $1; # untaint argument
    } else {
      ERROR "Option \"--lockfile\" is not a valid file name: \"$lockfile_cmdline\"";
      HELP_MESSAGE(0);
      exit 2;
    }
  }


  INFO "$VERSION_INFO  (" . localtime($start_time) . ")";
  action("startup", status => "v$VERSION", message => $VERSION_INFO, time => $start_time);


  #
  # parse config file
  #
  my $config;
  if(my $config_file = _config_file(@config_src)) {
    INFO "Using configuration: $config_file";
    $config = parse_config($config_file);
    exit 2 unless($config);
  }
  elsif($fallback_default_config) {
    INFO "Configuration file not found, falling back to defaults";
    $config = init_config();
  }
  else {
    ERROR "Configuration file not found: " . join(', ', @config_src);
    exit 2;
  }

  unless(ref($config->{SUBSECTION}) eq "ARRAY") {
    ERROR "No volumes defined in configuration file";
    exit 2;
  }

  # input validation (part 2, after config is initialized)
  @subvol_args = map { vinfo($_, $config) } @subvol_args;
  if($subvol_args_init) {
    foreach(@subvol_args) {
      unless(vinfo_init_root($_)) {
        ERROR "Failed to fetch subvolume detail for '$_->{PRINT}'" , @stderr;
        exit 1;
      }
      if(defined($_->{NODE_SUBDIR})) {
        ERROR "Argument is not a subvolume: $_->{PATH}";
        exit 1;
      }
      if(($subvol_args_init =~ /deny_root_subvol/) && $_->{node}{is_root}) {
        ERROR "Subvolume is btrfs root: $_->{PATH}";
        exit 1;
      }
      if(($subvol_args_init =~ /restrict_same_fs/) && (not _is_same_fs_tree($subvol_args[0]->{node}, $_->{node}))) {
        ERROR "Subvolumes are not on the same btrfs filesystem!";
        exit 1;
      }
    }
  }


  if($action_diff)
  {
    #
    # print snapshot diff (btrfs find-new)
    #
    my $src_vol = $subvol_args[0];
    my $target_vol = $subvol_args[1];

    # NOTE: in some cases "cgen" differs from "gen", even for read-only snapshots (observed: gen=cgen+1)
    my $lastgen = $src_vol->{node}{gen} + 1;

    # dump files, sorted and unique
    my $ret = btrfs_subvolume_find_new($target_vol, $lastgen);
    exit 1 unless(ref($ret));

    INFO "Listing changed files for subvolume: $target_vol->{PRINT}  (gen=$target_vol->{node}{gen})";
    INFO "Starting at generation after subvolume: $src_vol->{PRINT}  (gen=$src_vol->{node}{gen})";
    INFO "Listing files modified within generation range: [$lastgen..$target_vol->{node}{gen}]";
    DEBUG "Newest file generation (transid marker) was: $ret->{transid_marker}";

    my $files = $ret->{files};
    my $total_len = 0;
    my @data;
    foreach my $name (sort keys %$files) {
      my $finfo = $files->{$name};
      $total_len += $finfo->{len};
      push @data, {
        flags => ($finfo->{new}               ? '+' : '.') .
                 ($finfo->{flags}->{COMPRESS} ? 'c' : '.') .
                 ($finfo->{flags}->{INLINE}   ? 'i' : '.'),
        count => scalar(keys(%{$finfo->{gen}})),
        size => print_size($finfo->{len}),
        file => $name,
       };
    }

    my $raw = ($output_format && $output_format eq "raw");
    print_formatted("diff", \@data, paragraph => 1);
    print "Total size: " . print_size($total_len) . "\n" unless($raw);
    exit 0;
  }


  if($action_extents)
  {
    #
    # print extents diff (filefrag)
    #

    # check system requirements
    my $extentmap_fn;
    if($dryrun) {
      $extentmap_fn = sub {
        INFO("Fetching extent information (dryrun) for: $_[0]->{PRINT}");
        return undef;
      };
    }
    elsif(eval_quiet { require IO::AIO; }) {
      # this is slightly faster (multithreaded) than filefrag
      $extentmap_fn=\&aio_extentmap;
    }
    elsif(check_exe("filefrag")) {
      INFO "IO::AIO module not present, falling back to 'filefrag' (slower)";
      $extentmap_fn=\&filefrag_extentmap;
    }
    else {
      ERROR 'Please install either "IO::AIO" perl module or "filefrag" (from e2fsprogs package)';
      exit 1;
    }
    INFO "Extent map caching disabled (consider setting \"cache_dir\" configuration option)" unless(config_key($config, 'cache_dir'));

    # resolve related subvolumes
    my @resolved_vol;
    if($extents_related) {
      # add all related subvolumes
      foreach my $svol (@subvol_args) {
        my $svol_gen = $svol->{node}{readonly} ? $svol->{node}{cgen} : $svol->{node}{gen};
        my @related = map({ vinfo_resolved_all_mountpoints($_, $svol->{VINFO_MOUNTPOINT}) // () }
                          _related_nodes($svol->{node})); # includes $svol
        push @resolved_vol, @related;
      }
    }
    else {
      @resolved_vol = @subvol_args;
    }

    my @data;
    # print results on ctrl-c
    $SIG{INT} = sub {
      print STDERR "\nERROR: Caught SIGINT, dumping incomplete list:\n";
      print_formatted("extent_diff", \@data);
      exit 1;
    };

    my $do_diff = ($action_extents eq "diff");
    my $prev_data;
    # sort by gen for r/w subvolumes, cgen on readonly subvolumes, as
    # "gen" is increased on readonly subvolume when snapshotted.
    # crawl descending, but display ascending (unshift):
    foreach my $vol (sort { ($b->{node}{readonly} ? $b->{node}{cgen} : $b->{node}{gen}) <=>
                            ($a->{node}{readonly} ? $a->{node}{cgen} : $a->{node}{gen}) }
                     @resolved_vol) {
      if($prev_data && ($prev_data->{_vinfo}{node}{id} == $vol->{node}{id})) {
        INFO "Skipping duplicate of \"$prev_data->{_vinfo}{PRINT}\": $vol->{PRINT}";
        next;
      }

      # read extent map
      if($vol->{EXTENTMAP} = read_extentmap_cache($vol)) {
        INFO "Using cached extent map: $vol->{PRINT}";
      } else {
        $vol->{EXTENTMAP} = $extentmap_fn->($vol);
        write_extentmap_cache($vol);
      }
      next unless($vol->{EXTENTMAP});

      if($do_diff && $prev_data) {
        my $diff_map = extentmap_diff($prev_data->{_vinfo}{EXTENTMAP}, $vol->{EXTENTMAP});
        $prev_data->{diff} = print_size(extentmap_size($diff_map));
      }
      $prev_data = {
        %{$vol->{node}}, # copy node
        total  => print_size(extentmap_size($vol->{EXTENTMAP})),
        subvol => $vol->{PRINT},
        _vinfo => $vol,
      };
      unshift @data, $prev_data;
    }

    my @universe_set = map $_->{_vinfo}{EXTENTMAP}, @data;
    unless(scalar(@universe_set)) {
      ERROR "No extent map data, exiting";
      exit -1;
    }

    my @summary;
    INFO "Calculating union of " . scalar(@data) . " subvolumes";
    push @summary, {
      a => "Union (" . scalar(@data) . " subvolumes):",
      b => print_size(extentmap_size(extentmap_merge(@universe_set)))
     };

    INFO "Calculating set-exclusive size for " . scalar(@data) . " subvolumes";
    foreach my $d (@data) {
      my $vol = $d->{_vinfo};
      DEBUG "Calculating exclusive for: $vol->{PRINT}";
      my @others = grep { $_ != $vol->{EXTENTMAP} } @universe_set;
      $d->{exclusive} = print_size(extentmap_size(extentmap_diff($vol->{EXTENTMAP}, extentmap_merge(@others)))),
    }

    if(scalar(@filter_vf)) {
      INFO "Calculating set difference (X \\ A)";
      my @excl;
      my @others;
      foreach(@data) {
        if(vinfo_match(\@filter_vf, $_->{_vinfo})) {
          $_->{set} = "X";
          push @excl, $_->{_vinfo}{EXTENTMAP};
        } else {
          $_->{set} = "A";
          push @others, $_->{_vinfo}{EXTENTMAP};
        }
      }
      push @summary, {
        a => "Exclusive data ( X \\ A ):",
        b => print_size(extentmap_size(extentmap_diff(extentmap_merge(@excl), extentmap_merge(@others)))),
      };
    }
    unless($do_diff) {
      @data = sort { $a->{subvol} cmp $b->{subvol} } @data;
    }

    INFO "Printing extents map set difference: (extents \\ extents-on-prev-line)";
    print_formatted("extent_diff", \@data, paragraph => 1);
    print_formatted({ table => [ qw( a b ) ], RALIGN => { b=>1 } },
                    \@summary, output_format => "table", no_header => 1);
    exit 0;
  }


  if($action_ls)
  {
    #
    # print accessible subvolumes for local path
    #
    my $exit_status = 0;
    my %data_uniq;
    foreach my $root_vol (@subvol_args) {
      # map url to real path (we need to match against mount points below)
      my $root_path = system_realpath($root_vol);
      unless($root_path) {
        ERROR "Cannot find real path for: $root_vol->{PATH}", @stderr;
        $exit_status = 1;
        next;
      }
      $root_vol = vinfo($root_vol->{URL_PREFIX} . $root_path, $config);
      $root_path .= '/' unless($root_path =~ /\/$/);  # append trailing slash
      INFO "Listing subvolumes for directory: $root_vol->{PRINT}";

      my $mountinfo = $mountinfo_cache{$root_vol->{MACHINE_ID}};
      unless($mountinfo) {
        $mountinfo = system_list_mountinfo($root_vol);
        unless($mountinfo) {
          $exit_status = 1;
          next;
        }
        $mountinfo_cache{$root_vol->{MACHINE_ID}} = $mountinfo;
      }

      my @mnt_path_hidden;
      foreach my $mnt (reverse @$mountinfo) {
        my $mnt_path = $mnt->{mount_point};
        $mnt_path .= '/' unless($mnt_path =~ /\/$/);  # append trailing slash

        if(($mnt->{fs_type} eq "btrfs") &&
           (($root_path =~ /^\Q$mnt_path\E/) || ($mnt_path =~ /^\Q$root_path\E/)))
        {
          unless(defined(check_file($mnt->{mount_point}, { absolute => 1}))) {
            ERROR "Ignoring btrfs mount point (unsupported file name): $mnt->{mount_point}";
            $exit_status = 1;
            next;
          }

          # we know those are real paths, prevents calling readlink in btrfs_mountpoint
          $realpath_cache{$root_vol->{URL_PREFIX} . $mnt->{mount_point}} = $mnt->{mount_point};

          my $vol = vinfo($root_vol->{URL_PREFIX} . $mnt->{mount_point}, $config);
          DEBUG "Processing btrfs mount point: $mnt_path";

          unless(vinfo_init_root($vol)) {
            ERROR "Failed to fetch subvolume detail for: $vol->{PRINT}", @stderr;
            $exit_status = 1;
            next;
          }

          my $subvol_list = vinfo_subvol_list($vol);
          my $count_added = 0;
          foreach my $svol ($vol, @$subvol_list) {
            my $svol_path = $svol->{PATH};
            $svol_path =~ s/^\/\//\//;  # sanitize "//" (see vinfo_child)

            my $svol_path_ts = $svol_path . ($svol_path =~ /\/$/ ? "" : "/"); # append trailing slash
            next unless($svol_path_ts =~ /^\Q$root_path\E/);
            if(grep { $svol_path_ts =~ /^\Q$_\E/ } @mnt_path_hidden) {
              DEBUG "subvolume is hidden by another mount point: $svol->{PRINT}";
              next;
            }

            $data_uniq{$svol->{PRINT}} = {
              %{$svol->{node}}, # copy node
              top => $svol->{node}{top_level}, # alias (narrow column)
              mount_point => $svol->{VINFO_MOUNTPOINT}{PATH},
              mount_source => $svol->{node}{TREE_ROOT}{mount_source},
              mount_subvolid => $mnt->{MNTOPS}{subvolid},
              mount_subvol => $mnt->{MNTOPS}{subvol},
              subvolume_path => $svol->{node}{path},
              subvolume_rel_path => $svol->{node}{REL_PATH},
              url  => $svol->{URL},
              host => $svol->{HOST},
              path => $svol_path,
              flags => ($svol->{node}{readonly} ? "readonly" : undef),
            };
            $count_added++;
          }
          DEBUG "Listing $count_added/" . (scalar(@$subvol_list) + 1) . " subvolumes for btrfs mount: $vol->{PRINT}";
        }
        else {
          TRACE "Skipping mount point: $mnt_path (fs_type=$mnt->{fs_type})" if($do_trace);
        }

        last if($root_path =~ /^\Q$mnt_path\E/);
        push @mnt_path_hidden, $mnt_path;
      }
    }

    my @sorted = sort { (($a->{host} // "") cmp ($b->{host} // "")) ||
                        ($a->{mount_point} cmp $b->{mount_point}) ||
                        ($a->{path} cmp $b->{path})
                      } values %data_uniq;
    $output_format ||= "short";
    print_formatted("fs_list", \@sorted, no_header => !scalar(@sorted));

    exit $exit_status;
  }


  #
  # try exclusive lock if set in config or command-line option
  #
  $lockfile //= config_key($config, "lockfile");
  if(defined($lockfile) && (not $dryrun)) {
    unless(open(LOCKFILE, ">>$lockfile")) {
      # NOTE: the lockfile is never deleted by design
      ERROR "Failed to open lock file '$lockfile': $!";
      exit 3;
    }
    unless(flock(LOCKFILE, 6)) {  #  exclusive, non-blocking (LOCK_EX | LOCK_NB)
      ERROR "Failed to take lock (another btrbk instance is running): $lockfile";
      exit 3;
    }
  }


  if($action_archive)
  {
    #
    # archive (clone) tree
    #
    # NOTE: This is intended to work without a config file! The only
    # thing used from the configuration is the SSH and transaction log
    # stuff.
    #
    init_transaction_log(config_key($config, "transaction_log"),
                         config_key($config, "transaction_syslog"));

    my $src_root     = $subvol_args[0] || die;
    my $archive_root = $subvol_args[1] || die;

    # FIXME: add command line options for preserve logic
    $config->{SUBSECTION} = [];  # clear configured subsections, we build them dynamically

    unless(vinfo_init_root($src_root)) {
      ERROR "Failed to fetch subvolume detail for '$src_root->{PRINT}'", @stderr;
      exit 1;
    }
    unless($archive_raw ? vinfo_init_raw_root($archive_root) : vinfo_init_root($archive_root)) {
      ERROR "Failed to fetch " . ($archive_raw ? "raw target metadata" : "subvolume detail") . " for '$archive_root->{PRINT}'", @stderr;
      exit 1;
    }

    my %name_uniq;
    my @subvol_list = @{vinfo_subvol_list($src_root)};
    my @sorted = sort { ($a->{subtree_depth} <=> $b->{subtree_depth}) || ($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR}) } @subvol_list;
    foreach my $vol (@sorted) {
      next unless($vol->{node}{readonly});
      my $snapshot_name = $vol->{node}{BTRBK_BASENAME};
      unless(defined($snapshot_name)) {
        WARN "Skipping subvolume (not a btrbk subvolume): $vol->{PRINT}";
        next;
      }
      my $subvol_dir = $vol->{SUBVOL_DIR};
      next if($name_uniq{"$subvol_dir/$snapshot_name"});
      $name_uniq{"$subvol_dir/$snapshot_name"} = 1;
      my $droot_url = $archive_root->{URL} . ($subvol_dir eq "" ? "" : "/$subvol_dir");
      my $sroot_url = $src_root->{URL} . ($subvol_dir eq "" ? "" : "/$subvol_dir");
      my $config_sroot = { CONTEXT       => "archive_source",
                           PARENT        => $config,
                           url           => $sroot_url,     # ABORTED() needs this
                           snapshot_name => $snapshot_name,
                         };
      my $config_droot = { CONTEXT       => "archive_target",
                           PARENT        => $config_sroot,
                           target_type   => ($archive_raw ? "raw" : "send-receive"), # macro_send_receive checks this
                           url           => $droot_url,     # ABORTED() needs this
                         };
      $config_sroot->{SUBSECTION} = [ $config_droot ];
      push(@{$config->{SUBSECTION}}, $config_sroot);

      my $sroot = vinfo($sroot_url, $config_sroot);
      vinfo_assign_config($sroot);
      unless(vinfo_init_root($sroot)) {
        ABORTED($sroot, "Failed to fetch subvolume detail");
        WARN "Skipping archive source \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot), @stderr;
        next;
      }

      my $droot = vinfo($droot_url, $config_droot);
      vinfo_assign_config($droot);
      unless($archive_raw ? vinfo_init_raw_root($droot) : vinfo_init_root($droot)) {
        DEBUG "Failed to fetch " . ($archive_raw ? "raw target metadata" : "subvolume detail") . " for '$droot->{PRINT}'";
        unless(system_mkdir($droot)) {
          ABORTED($droot, "Failed to create directory: $droot->{PRINT}/");
          WARN "Skipping archive target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
          next;
        }
        $droot->{SUBDIR_CREATED} = 1;
        if($dryrun) {
          # we need to fake this directory on dryrun
          $droot->{node} = $archive_root->{node};
          $droot->{NODE_SUBDIR} = $subvol_dir;
          $droot->{VINFO_MOUNTPOINT} = $archive_root->{VINFO_MOUNTPOINT};
        }
        else {
          # after directory is created, try to init again
          unless($archive_raw ? vinfo_init_raw_root($droot) : vinfo_init_root($droot)) {
            ABORTED($droot, "Failed to fetch subvolume detail");
            WARN "Skipping archive target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
            next;
          }
        }
      }
      if(_is_same_fs_tree($droot->{node}, $vol->{node})) {
        ERROR "Source and target subvolumes are on the same btrfs filesystem!";
        exit 1;
      }
    }

    # translate archive_exclude globs, add to exclude args
    my $archive_exclude = config_key($config, 'archive_exclude') // [];
    push @exclude_vf, map(vinfo_filter_statement($_), (@$archive_exclude));

    # create archives
    my $schedule_results = [];
    my $aborted;
    foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
      if($aborted) {
        # abort all subsequent sources on any abort (we don't want to go on hammering on "disk full" errors)
        ABORTED($sroot, $aborted);
        next;
      }
      my $snapshot_name = config_key($sroot, "snapshot_name") // die;

      # skip on archive_exclude and --exclude option
      if(vinfo_match(\@exclude_vf, $sroot) ||
         vinfo_match(\@exclude_vf, vinfo_child($sroot, $snapshot_name)))
      {
        ABORTED($sroot, "skip_archive_exclude", "Match on exclude pattern");
        INFO "Skipping archive subvolumes \"$sroot->{PRINT}/${snapshot_name}.*\": " . ABORTED_TEXT($sroot);
        next;
      }

      foreach my $droot (vinfo_subsection($sroot, 'archive_target')) {
        INFO "Archiving subvolumes: $sroot->{PRINT}/${snapshot_name}.*";
        macro_archive_target($sroot, $droot, $snapshot_name, { results => $schedule_results });
        if(IS_ABORTED($droot)) {
          # also abort $sroot
          $aborted = "At least one target aborted earlier";
          ABORTED($sroot, $aborted);
          WARN "Skipping archiving of \"$sroot->{PRINT}/\": " . ABORTED_TEXT($sroot);
          last;
        }
      }
    }

    # delete archives
    my $del_schedule_results;
    if($preserve_backups) {
      INFO "Preserving all archives (option \"-p\" or \"-r\" present)";
    }
    else
    {
      $del_schedule_results = [];
      foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
        my $snapshot_name = config_key($sroot, "snapshot_name") // die;
        foreach my $droot (vinfo_subsection($sroot, 'archive_target')) {
          INFO "Cleaning archive: $droot->{PRINT}/${snapshot_name}.*";
          macro_delete($droot, "", $snapshot_name, $droot,
                       { preserve     => config_preserve_hash($droot, "archive"),
                         results      => $del_schedule_results,
                         result_hints => { topic => "archive", root_path => $droot->{PATH} },
                       },
                       commit => config_key($droot, "btrfs_commit_delete"),
                       type   => "delete_archive",
                       qgroup => { destroy => config_key($droot, "archive_qgroup_destroy"),
                                   type => "qgroup_destroy_archive" },
                      );
        }
      }
    }


    my $exit_status = exit_status($config);
    my $time_elapsed = time - $start_time;
    INFO "Completed within: ${time_elapsed}s  (" . localtime(time) . ")";
    action("finished",
           status => $exit_status ? "partial" : "success",
           duration => $time_elapsed,
           message => $exit_status ? "At least one backup task aborted" : undef,
          );
    close_transaction_log();

    unless($quiet)
    {
      # print scheduling results
      if($print_schedule) {
        my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
        print_formatted("schedule", \@data, title => "ARCHIVE SCHEDULE", paragraph => 1);
      }

      if($print_schedule && $del_schedule_results) {
        my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$del_schedule_results;
        print_formatted("schedule", \@data, title => "DELETE SCHEDULE", paragraph => 1);
      }

      # print summary
      $output_format ||= "custom";
      if($output_format eq "custom")
      {
        my @out;
        foreach my $sroot (vinfo_subsection($config, 'archive_source', 1)) {
          foreach my $droot (vinfo_subsection($sroot, 'archive_target', 1)) {
            my @subvol_out;
            if($droot->{SUBDIR_CREATED}) {
              push @subvol_out, "++. $droot->{PRINT}/";
            }
            foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
              my $create_mode = "***";
              $create_mode = ">>>" if($_->{parent});
              $create_mode = "!!!" if($_->{ERROR});
              push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
            }
            foreach(@{$droot->{SUBVOL_DELETED} // []}) {
              push @subvol_out, "--- $_->{PRINT}";
            }
            if(IS_ABORTED($droot, "abort_") || IS_ABORTED($sroot, "abort_")) {
              push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . (ABORTED_TEXT($droot) || ABORTED_TEXT($sroot));
            }
            elsif(IS_ABORTED($sroot, "skip_archive_exclude")) {
              push @subvol_out, "<archive_exclude>";
            }
            unless(@subvol_out) {
              push @subvol_out, "[-] $droot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*";
            }
            push @out, "$sroot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*", @subvol_out, "";
          }
        }

        my @cmdline_options = map { "exclude: $_" } @exclude_cmdline;
        push @cmdline_options, "preserve: Preserved all archives" if($preserve_backups);

        print_header(title => "Archive Summary",
                     time => $start_time,
                     options => \@cmdline_options,
                     legend => [
                       "++.  created directory",
                       "---  deleted subvolume",
                       "***  received subvolume (non-incremental)",
                       ">>>  received subvolume (incremental)",
                       "[-]  no action",
                      ],
                    );
        print join("\n", @out);
        print_footer($config, $exit_status);
      }
      else
      {
        # print action log (without transaction start messages)
        my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
        print_formatted("transaction", \@data, title => "TRANSACTION LOG");
      }
    }

    exit $exit_status;
  }


  #
  # expand subvolume globs (wildcards)
  #
  foreach my $config_vol (@{$config->{SUBSECTION}}) {
    die unless($config_vol->{CONTEXT} eq "volume");

    # read-in subvolume list (and expand globs) only if needed
    next unless(grep defined($_->{GLOB_CONTEXT}), @{$config_vol->{SUBSECTION}});
    my $sroot_glob; # read-in late
    my @vol_subsection_expanded;
    foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
      die unless($config_subvol->{CONTEXT} eq "subvolume");
      if($config_subvol->{GLOB_CONTEXT}) {
        my $globs = $config_subvol->{rel_path};

        # read-in subvolume list once, only if needed
        unless(defined($sroot_glob)) {
          $sroot_glob = vinfo($config_vol->{url}, $config_vol);
          unless(vinfo_init_root($sroot_glob)) {
            ABORTED($sroot_glob, "Failed to fetch subvolume detail");
            WARN "Skipping subvolume (wildcards) \"$sroot_glob->{PRINT}/$globs\": " . ABORTED_TEXT($sroot_glob), @stderr;
            last;
          }
        }

        INFO "Expanding wildcards: $sroot_glob->{PRINT}/$globs";

        # support "*some*file*", "*/*"
        my $match = join('[^\/]*', map(quotemeta($_), split(/\*+/, $globs, -1)));
        TRACE "translated globs \"$globs\" to regex \"$match\"" if($do_trace);
        my $expand_count = 0;
        foreach my $vol (@{vinfo_subvol_list($sroot_glob, sort => 'path')})
        {
          if($vol->{node}{readonly}) {
            TRACE "skipping readonly subvolume: $vol->{PRINT}" if($do_trace);
            next;
          }
          unless($vol->{SUBVOL_PATH} =~ /^$match$/) {
            TRACE "skipping non-matching subvolume: $vol->{PRINT}" if($do_trace);
            next;
          }
          unless(defined(check_file($vol->{SUBVOL_PATH}, { relative => 1 }))) {
            WARN "Ambiguous subvolume path \"$vol->{SUBVOL_PATH}\" while expanding \"$globs\", ignoring";
            next;
          }
          INFO "Found source subvolume: $vol->{PRINT}";
          my %conf = ( %$config_subvol,
                       rel_path_glob => $globs,
                       rel_path      => $vol->{SUBVOL_PATH},
                       url           => $vol->{URL},
                       snapshot_name => $vol->{NAME},  # snapshot_name defaults to subvolume name
                      );
          # deep copy of target subsection
          my @subsection_copy = map { { %$_, PARENT => \%conf }; } @{$config_subvol->{SUBSECTION}};
          $conf{SUBSECTION} = \@subsection_copy;
          push @vol_subsection_expanded, \%conf;
          $expand_count += 1;
        }
        unless($expand_count) {
          WARN "No subvolumes found matching: $sroot_glob->{PRINT}/$globs";
        }
      }
      else {
        push @vol_subsection_expanded, $config_subvol;
      }
    }
    $config_vol->{SUBSECTION} = \@vol_subsection_expanded;
  }
  TRACE(Data::Dumper->Dump([$config], ["config"])) if($do_trace && $do_dumper);


  #
  # create vinfo nodes (no readin yet)
  #
  foreach my $config_vol (@{$config->{SUBSECTION}}) {
    die unless($config_vol->{CONTEXT} eq "volume");
    my $sroot = vinfo($config_vol->{url}, $config_vol);
    vinfo_assign_config($sroot);
    foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
      die unless($config_subvol->{CONTEXT} eq "subvolume");
      my $svol = vinfo_child($sroot, $config_subvol->{rel_path}, $config_subvol);
      # TODO: add config option "snapshot_path", reuse snaproot with same URL
      my $snapshot_dir = config_key($svol, "snapshot_dir", prefix => '/') // "";
      my $snaproot = vinfo($config_vol->{url} . $snapshot_dir, $config_subvol);
      vinfo_assign_config($svol, $snaproot);
      foreach my $config_target (@{$config_subvol->{SUBSECTION}}) {
        die unless($config_target->{CONTEXT} eq "target");
        my $droot = vinfo($config_target->{url}, $config_target);
        vinfo_assign_config($droot);
      }
    }
  }


  #
  # filter subvolumes matching command line arguments, handle noauto option
  #
  if(scalar @filter_vf)
  {
    foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
      my $found_vol = 0;
      if(vinfo_match(\@filter_vf, $sroot, flag_matched => '_matched')) {
        next;
      }
      foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
        my $found_subvol = 0;
        my $snaproot = vinfo_snapshot_root($svol);
        my $snapshot_name = config_key($svol, "snapshot_name") // die;
        if(vinfo_match(\@filter_vf, $svol, flag_matched => '_matched') ||
           vinfo_match(\@filter_vf, vinfo_child($snaproot, $snapshot_name), flag_matched => '_matched'))
        {
          $found_vol = 1;
          next;
        }
        foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
          if(vinfo_match(\@filter_vf, $droot, flag_matched => '_matched') ||
             vinfo_match(\@filter_vf, vinfo_child($droot, $snapshot_name), flag_matched => '_matched'))
          {
            $found_subvol = 1;
            $found_vol = 1;
          }
          else {
            ABORTED($droot, "skip_cmdline_filter", "No match on filter command line argument");
            DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
          }
        }
        unless($found_subvol) {
          ABORTED($svol, "skip_cmdline_filter", "No match on filter command line argument");
          DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        }
      }
      unless($found_vol) {
        ABORTED($sroot, "skip_cmdline_filter", "No match on filter command line argument");
        DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
      }
    }
    # make sure all args have a match
    my @nomatch = map { $_->{_matched} ? () : $_->{unparsed} } @filter_vf;
    if(@nomatch) {
      foreach(@nomatch) {
        ERROR "Filter argument \"$_\" does not match any volume, subvolume, target or group declaration";
      }
      exit 2;
    }
    $config->{CMDLINE_FILTER_LIST} = [ map { $_->{unparsed} } @filter_vf ];
  }
  elsif(not $action_config_print)
  {
    # no filter_args present, abort "noauto" contexts
    if(config_key($config, "noauto")) {
      WARN "Option \"noauto\" is set in global context, and no filter argument present, exiting";
      exit 0;
    }
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      if(config_key($sroot, "noauto")) {
        ABORTED($sroot, "skip_noauto", 'option "noauto" is set');
        DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
        next;
      }
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        if(config_key($svol, "noauto")) {
          ABORTED($svol, "skip_noauto", 'option "noauto" is set');
          DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
          next;
        }
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          if(config_key($droot, "noauto")) {
            ABORTED($droot, "skip_noauto", 'option "noauto" is set');
            DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
          }
        }
      }
    }
  }

  if(scalar @exclude_vf)
  {
    # handle --exclude command line option
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      if(my $ff = vinfo_match(\@exclude_vf, $sroot)) {
        ABORTED($sroot, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
        DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
        next;
      }
      my $all_svol_aborted = 1;
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        my $snaproot = vinfo_snapshot_root($svol);
        my $snapshot_name = config_key($svol, "snapshot_name") // die;
        if(my $ff = (vinfo_match(\@exclude_vf, $svol) ||
                     vinfo_match(\@exclude_vf, vinfo_child($snaproot, $snapshot_name))))
        {
          ABORTED($svol, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
          DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
          next;
        }
        $all_svol_aborted = 0;
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          if(my $ff = (vinfo_match(\@exclude_vf, $droot) ||
                       vinfo_match(\@exclude_vf, vinfo_child($droot, $snapshot_name))))
          {
            ABORTED($droot, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
            DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
            next;
          }
        }
      }
      if($all_svol_aborted) {
        ABORTED($sroot, "skip_cmdline_exclude", "All subvolumes excluded");
        DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
      }
    }
  }

  if($action_usage)
  {
    #
    # print filesystem information
    #
    my @data;
    my %processed;
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      unless($processed{$sroot->{URL}}) {
        my $usage = btrfs_filesystem_usage($sroot) // {};
        push @data, { %$usage,
                      type => "source",
                      vinfo_prefixed_keys("", $sroot),
                    };
        $processed{$sroot->{URL}} = 1;
      }
    }

    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          unless($processed{$droot->{URL}}) {
            my $usage = btrfs_filesystem_usage($droot) // {};
            push @data, { %$usage,
                          type => "target",
                          vinfo_prefixed_keys("", $droot),
                        };
            $processed{$droot->{URL}} = 1;
          }
        }
      }
    }
    print_formatted("usage", \@data);
    exit exit_status($config);
  }


  if($action_config_print)
  {
    my $resolve = ($action_config_print eq "print-all");
    #
    # print configuration lines, machine readable
    #
    my @out;
    push @out, config_dump_keys($config, resolve => $resolve);
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      push @out, "\nvolume $sroot->{URL}";
      push @out, config_dump_keys($sroot, prefix => "\t", resolve => $resolve);
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        push @out, ""; # newline
        push @out, "\t# subvolume $svol->{CONFIG}->{rel_path_glob}" if(defined($svol->{CONFIG}->{rel_path_glob}));
        push @out, "\tsubvolume $svol->{SUBVOL_PATH}";
        push @out, config_dump_keys($svol, prefix => "\t\t", resolve => $resolve);
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          push @out, "\n\t\ttarget $droot->{CONFIG}->{target_type} $droot->{URL}";
          push @out, config_dump_keys($droot, prefix => "\t\t\t", resolve => $resolve);
        }
      }
    }

    print_header(title => "Configuration Dump",
                 config => $config,
                 options => [ map { "exclude: $_" } @exclude_cmdline ],
                 time => $start_time,
                );

    print join("\n", @out) . "\n";
    exit exit_status($config);
  }


  if($action_list)
  {
    my @vol_data;
    my @subvol_data;
    my @target_data;
    my @mixed_data;
    my %target_uniq;

    #
    # print configuration lines, machine readable
    #
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      my $volh = { vinfo_prefixed_keys("volume", $sroot) };
      push @vol_data, $volh;

      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        my $snaproot = vinfo_snapshot_root($svol);
        my $subvolh = { %$volh,
                        vinfo_prefixed_keys("source", $svol),
                        snapshot_path     => $snaproot->{PATH},
                        snapshot_name     => config_key($svol, "snapshot_name"),
                        snapshot_preserve => format_preserve_matrix(config_preserve_hash($svol, "snapshot")),
                      };
        push @subvol_data, $subvolh;

        my $found = 0;
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          my $targeth = { %$subvolh,
                          vinfo_prefixed_keys("target", $droot),
                          target_preserve => format_preserve_matrix(config_preserve_hash($droot, "target")),
                          target_type => $droot->{CONFIG}{target_type},   # "send-receive" or "raw"
                        };
          if($action_list eq "target") {
            next if($target_uniq{$droot->{URL}});
            $target_uniq{$droot->{URL}} = 1;
          }
          push @target_data, $targeth;
          push @mixed_data, $targeth;
          $found = 1;
        }
        # make sure the subvol is always printed (even if no targets around)
        push @mixed_data, $subvolh unless($found);
      }
    }
    if($action_list eq "volume") {
      print_formatted("config_volume", \@vol_data);
    }
    elsif($action_list eq "source") {
      print_formatted("config_source", \@subvol_data);
    }
    elsif($action_list eq "target") {
      print_formatted("config_target", \@target_data);
    }
    elsif($action_list eq "config") {
      print_formatted("config", \@mixed_data);
    }
    else {
      die "unknown action_list=$action_list";
    }
    exit exit_status($config);
  }


  #
  # fill vinfo hash, basic checks on configuration
  #

  # read volume btrfs tree, and make sure subvolume exist
  foreach my $sroot (vinfo_subsection($config, 'volume')) {
    DEBUG "Initializing volume section: $sroot->{PRINT}";
    unless(vinfo_init_root($sroot)) {
      ABORTED($sroot, "Failed to fetch subvolume detail");
      WARN "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot), @stderr;
      next;
    }
    unless(scalar(vinfo_subsection($sroot, 'subvolume'))) {
      WARN "No subvolume configured for \"volume $sroot->{URL}\"";
    }
    foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
      DEBUG "Initializing subvolume section: $svol->{PRINT}";
      unless(vinfo_init_root($svol)) {
        ABORTED($svol, "Failed to fetch subvolume detail");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol), @stderr;
        next;
      }
      if((not $svol->{node}{uuid}) || ($svol->{node}{uuid} eq '-')) {
        ABORTED($svol, "subvolume has no UUID");
        ERROR "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        next;
      }
      if($svol->{node}{readonly}) {
        ABORTED($svol, "subvolume is readonly");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        next;
      }
      if($svol->{node}{received_uuid} ne '-') {
        ABORTED($svol, "\"Received UUID\" is set");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        next;
      }
      if(_is_child_of($sroot->{node}, $svol->{node}{uuid}) ||
         ($svol->{node}{uuid} eq $sroot->{node}{uuid}))
      {
        DEBUG "Found \"$svol->{PRINT}\" (id=$svol->{node}{id}) in btrfs tree of: $sroot->{PRINT}";
      } else {
        ABORTED($svol, "Not a child subvolume of: $sroot->{PRINT}");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        next;
      }

      my $snaproot = vinfo_snapshot_root($svol);
      unless(vinfo_init_root($snaproot)) {
        ABORTED($svol, "Failed to fetch subvolume detail for snapshot_dir");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol), @stderr;
        next;
      }
      unless(_is_same_fs_tree($snaproot->{node}, $svol->{node})) {
        ABORTED($svol, "Snapshot path is not on same filesystem");
        WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
        next;
      }
    }
  }

  # read target btrfs tree
  if($action_run && $skip_backups && $preserve_snapshots && $preserve_backups) {
    # if running "btrbk snapshot --preserve", there is no need to
    # initialize targets, and we don't want to fail on missing targets.
    DEBUG "Skipping target tree readin (preserving all snapshots and backups)";
  }
  else {
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          DEBUG "Initializing target section: $droot->{PRINT}";
          my $target_type = $droot->{CONFIG}->{target_type} || die;
          if($target_type eq "send-receive")
          {
            unless(vinfo_init_root($droot)) {
              ABORTED($droot, "Failed to fetch subvolume detail");
              WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
              next;
            }
          }
          elsif($target_type eq "raw")
          {
            unless(vinfo_init_raw_root($droot)) {
              ABORTED($droot, "Failed to fetch raw target metadata");
              WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
              next;
            }
          }

          if($config_override{FAILSAFE_PRESERVE}) {
            ABORTED($droot, $config_override{FAILSAFE_PRESERVE});
            WARN  "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
          }
        }
      }
    }
  }

  # check for duplicate snapshot locations
  foreach my $sroot (vinfo_subsection($config, 'volume')) {
    foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
      my $snapshot_basename = config_key($svol, "snapshot_name") // die;

      # check for duplicate snapshot locations
      if(config_key($svol, "snapshot_create")) {
        my $snaproot = vinfo_snapshot_root($svol);
        my $snaproot_subdir_path = (defined($snaproot->{NODE_SUBDIR}) ? $snaproot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
        if(my $prev = $snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path}) {
          ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same snapshot: $snaproot->{PRINT}/${snapshot_basename}.*";
          ERROR "Please fix \"snapshot_name\" configuration options!";
          exit 1;
        }
        $snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path} = $svol->{PRINT};
      }

      # check for duplicate target locations
      foreach my $droot (vinfo_subsection($svol, 'target')) {
        my $droot_subdir_path = (defined($droot->{NODE_SUBDIR}) ? $droot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
        if(my $prev = $droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path}) {
          ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same backup target: $droot->{PRINT}/${snapshot_basename}.*";
          ERROR "Please fix \"snapshot_name\" or \"target\" configuration options!";
          exit 1;
        }
        $droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path} = $svol->{PRINT};
      }
    }
  }


  if($action_origin)
  {
    #
    # print origin information
    #
    my $vol = $subvol_args[0] || die;
    my $lines = [];
    _origin_tree("", $vol->{node}, $lines);

    $output_format ||= "custom";
    if($output_format eq "custom") {
      print_header(title  => "Origin Tree",
                   config => $config,
                   time   => $start_time,
                   legend => [
                     "^--     : parent subvolume",
                     "newline : received-from relationship with subvolume (identical content)",
                    ]
                  );
      print join("\n", map { $_->{tree} } @$lines) . "\n";
    }
    else {
      print_formatted('origin_tree', $lines );
    }
    exit 0;
  }


  if($action_resolve)
  {
    my @data;
    my %stats = ( snapshots => 0, backups => 0, correlated => 0, incomplete => 0, orphaned => 0 );
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        my $snaproot = vinfo_snapshot_root($svol);
        my $snapshot_name = config_key($svol, "snapshot_name") // die;
        my @related_snapshots = get_related_snapshots($snaproot, $svol, $snapshot_name);

        my %svol_data = (
          vinfo_prefixed_keys("source", $svol),
          snapshot_name => $snapshot_name,
         );
        my @sdata = map +{
          %svol_data,
          type => "snapshot",
          status => ($_->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : "",
          vinfo_prefixed_keys("snapshot", $_),
          _vinfo => $_,
          _btrbk_date => $_->{node}{BTRBK_DATE},
        }, @related_snapshots;

        my %svol_stats_data = (
          %svol_data,
          snapshot_subvolume => "$snaproot->{PATH}/$snapshot_name.*",
          snapshot_status => (grep { $_->{status} eq "up-to-date" } @sdata) ? "up-to-date" : "",
          snapshots => scalar(@sdata),
         );
        $stats{snapshots} += scalar(@sdata);

        my (@bdata, @ldata, @stdata);
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          my %dstats = ( backups => 0, correlated => 0, orphaned => 0, incomplete => 0, uptodate => 0 );
          my $latest_backup;
          foreach my $target_vol (@{vinfo_subvol_list($droot, btrbk_direct_leaf => $snapshot_name, sort => 'path')}) {
            my $target_data = {
              %svol_data,
              type => "backup",
              target_type => $target_vol->{CONFIG}{target_type},   # "send-receive" or "raw"
              vinfo_prefixed_keys("target", $target_vol),
              _btrbk_date => $target_vol->{node}{BTRBK_DATE},
            };

            # incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
            # a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
            if($target_vol->{node}{received_uuid} eq '-') {
              $dstats{incomplete}++;
              $target_data->{status} = "incomplete";
              push @bdata, $target_data;
              next;
            }

            foreach (@sdata) {
              if(_is_correlated($_->{_vinfo}{node}, $target_vol->{node})) {
                $target_data = {
                  %$_,
                  %$target_data,
                  type => "snapshot,backup",
                  _correlated => 1,
                };
                $_->{_correlated} = 1;
                last;
              }
            }
            push @bdata, $target_data;
            $latest_backup = $target_data if(!defined($latest_backup) || (cmp_date($latest_backup->{_btrbk_date}, $target_data->{_btrbk_date}) < 0));

            $dstats{uptodate} ||= ($target_data->{status} // "") eq "up-to-date";
            $dstats{backups}++;
            if($target_data->{_correlated}) { $dstats{correlated}++; } else { $dstats{orphaned}++; }
          }
          push @ldata, $latest_backup;

          push @stdata, {
            %svol_stats_data,
            %dstats,
            vinfo_prefixed_keys("target", $droot),
            target_subvolume => "$droot->{PATH}/$snapshot_name.*",
            backup_status => $dstats{uptodate} ? "up-to-date" : "",
          };
          $stats{$_} += $dstats{$_} foreach(qw(backups correlated incomplete orphaned));
        }

        if($action_resolve eq "snapshots") {
          push @data, @sdata;
        } elsif($action_resolve eq "backups") {
          push @data, @bdata;
        } elsif($action_resolve eq "all") {
          push @data, sort { cmp_date($a->{_btrbk_date}, $b->{_btrbk_date}) } (@bdata, grep { !$_->{_correlated} } @sdata);
        } elsif($action_resolve eq "latest") {
          my $latest_snapshot = (sort { cmp_date($b->{_btrbk_date}, $a->{_btrbk_date}) } (@sdata, @bdata))[0];
          push @data, @ldata;
          push @data, $latest_snapshot if($latest_snapshot && !$latest_snapshot->{_correlated});
        } elsif($action_resolve eq "stats") {
          @stdata = ( \%svol_stats_data ) unless(@stdata);
          push @data, @stdata;
        }
      }
    }

    if($action_resolve eq "stats") {
      my $filter = $config->{CMDLINE_FILTER_LIST} ? " (" . join(", ", @{$config->{CMDLINE_FILTER_LIST}}) . ")" : "";
      my @backup_total = map { $stats{$_} ? "$stats{$_} $_" : () } qw( correlated incomplete );
      my $bflags = @backup_total ? "(" . join(', ', @backup_total) . ")" : undef;

      print_formatted("stats", \@data, paragraph => 1);
      print "Total${filter}:\n";
      print_formatted({ table => [ qw( a b -c ) ], RALIGN => { a=>1 } },
                      [ { a => $stats{snapshots}, b => "snapshots" },
                        { a => $stats{backups},   b => "backups", c => $bflags } ],
                      output_format => "table", no_header => 1, empty_cell_char => "");
    }
    elsif($action_resolve eq "snapshots") {
      print_formatted("snapshots", \@data);
    }
    elsif($action_resolve eq "backups") {
      print_formatted("backups", \@data);
    }
    elsif($action_resolve eq "latest") {
      print_formatted("latest", \@data);
    }
    else {
      print_formatted("resolved", \@data);
    }

    exit exit_status($config);
  }


  if($action_clean)
  {
    #
    # identify and delete incomplete backups
    #
    init_transaction_log(config_key($config, "transaction_log"),
                         config_key($config, "transaction_syslog"));

    my @out;
    foreach my $sroot (vinfo_subsection($config, 'volume')) {
      foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
        my $snapshot_name = config_key($svol, "snapshot_name") // die;
        foreach my $droot (vinfo_subsection($svol, 'target')) {
          INFO "Cleaning incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
          push @out, "$droot->{PRINT}/$snapshot_name.*";
          my @delete;
          foreach my $target_vol (@{vinfo_subvol_list($droot, btrbk_direct_leaf => $snapshot_name, sort => 'path')}) {
            # incomplete received (garbled) subvolumes are not readonly and have no received_uuid (as of btrfs-progs v4.3.1).
            # a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
            if($target_vol->{node}{received_uuid} eq '-') {
              DEBUG "Found incomplete target subvolume: $target_vol->{PRINT}";
              push(@delete, $target_vol);
            }
          }

          my @delete_success = btrfs_subvolume_delete(\@delete, commit => config_key($droot, "btrfs_commit_delete"), type => "delete_garbled");
          INFO "Deleted " . scalar(@delete_success) . " incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
          $droot->{SUBVOL_DELETED} //= [];
          push @{$droot->{SUBVOL_DELETED}}, @delete_success;
          push @out, map("--- $_->{PRINT}", @delete_success);

          if(scalar(@delete_success) != scalar(@delete)) {
            ABORTED($droot, "Failed to delete incomplete target subvolume");
            push @out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
          }
          push(@out, "<no_action>") unless(scalar(@delete));
          push(@out, "");
        }
      }
    }

    my $exit_status = exit_status($config);
    my $time_elapsed = time - $start_time;
    INFO "Completed within: ${time_elapsed}s  (" . localtime(time) . ")";
    action("finished",
           status => $exit_status ? "partial" : "success",
           duration => $time_elapsed,
           message => $exit_status ? "At least one delete operation failed" : undef,
          );
    close_transaction_log();

    #
    # print summary
    #
    unless($quiet)
    {
      $output_format ||= "custom";
      if($output_format eq "custom")
      {
        print_header(title  => "Cleanup Summary",
                     config => $config,
                     time   => $start_time,
                     legend => [
                       "---  deleted subvolume (incomplete backup)",
                      ],
                    );
        print join("\n", @out);
        print_footer($config, $exit_status);
      }
      else
      {
        # print action log (without transaction start messages)
        my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
        print_formatted("transaction", \@data, title => "TRANSACTION LOG");
      }
    }

    exit $exit_status;
  }


  if($action_run)
  {
    init_transaction_log(config_key($config, "transaction_log"),
                         config_key($config, "transaction_syslog"));

    if($skip_snapshots) {
      INFO "Skipping snapshot creation (btrbk resume)";
    }
    else
    {
      #
      # create snapshots
      #
      foreach my $sroot (vinfo_subsection($config, 'volume')) {
        foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
          my $snaproot = vinfo_snapshot_root($svol);
          my $snapshot_basename = config_key($svol, "snapshot_name") // die;
          DEBUG "Evaluating snapshot creation for: $svol->{PRINT}";

          # check if we need to create a snapshot
          my $snapshot_create = config_key($svol, "snapshot_create");
          if(not $snapshot_create) {
            DEBUG "Snapshot creation disabled (snapshot_create=no)";
            next;
          }
          elsif($snapshot_create eq "always") {
            DEBUG "Snapshot creation enabled (snapshot_create=always)";
          }
          elsif($snapshot_create eq "onchange") {
            # check if latest (btrbk only!) snapshot is up-to-date with source subvolume (by generation)
            my $latest = get_latest_related_snapshot($snaproot, $svol, $snapshot_basename);
            if($latest) {
              if($latest->{node}{cgen} == $svol->{node}{gen}) {
                INFO "Snapshot creation skipped: snapshot_create=onchange, snapshot is up-to-date: $latest->{PRINT}";
                $svol->{SNAPSHOT_UP_TO_DATE} = $latest;
                next;
              }
              DEBUG "Snapshot creation enabled: snapshot_create=onchange, gen=$svol->{node}{gen} > snapshot_cgen=$latest->{node}{cgen}";
            }
            else {
              DEBUG "Snapshot creation enabled: snapshot_create=onchange, no snapshots found";
            }
          }
          elsif($snapshot_create eq "ondemand") {
            # check if at least one target is present
            if(scalar vinfo_subsection($svol, 'target')) {
              DEBUG "Snapshot creation enabled (snapshot_create=ondemand): at least one target is present";
            }
            else {
              INFO "Snapshot creation skipped: snapshot_create=ondemand, and no target is present for: $svol->{PRINT}";
              next;
            }
          }
          else {
            die "illegal value for snapshot_create configuration option: $snapshot_create";
          }

          # find unique snapshot name
          my $timestamp = timestamp(\@tm_now, config_key($svol, "timestamp_format"));
          my @unconfirmed_target_name;
          my @lookup = map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($snaproot)};
          foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
            if(IS_ABORTED($droot)) {
              push(@unconfirmed_target_name, $droot);
              next;
            }
            push(@lookup, map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($droot)});
          }
          @lookup = grep /^\Q$snapshot_basename.$timestamp\E(_[0-9]+)?$/ ,@lookup;
          TRACE "Present snapshot names for \"$svol->{PRINT}\": " . join(', ', @lookup) if($do_trace);
          @lookup = map { /_([0-9]+)$/ ? $1 : 0 } @lookup;
          @lookup = sort { $b <=> $a } @lookup;
          my $postfix_counter = $lookup[0] // -1;
          $postfix_counter++;
          my $snapshot_name = $snapshot_basename . '.' . $timestamp . ($postfix_counter ? "_$postfix_counter" : "");

          if(@unconfirmed_target_name) {
            INFO "Assuming non-present subvolume \"$snapshot_name\" in skipped targets: " . join(", ", map { "\"$_->{PRINT}\"" } @unconfirmed_target_name);
          }

          # finally create the snapshot
          INFO "Creating subvolume snapshot for: $svol->{PRINT}";
          my $snapshot = vinfo_child($snaproot, "$snapshot_name");
          if(btrfs_subvolume_snapshot($svol, $snapshot))
          {
            vinfo_inject_child($snaproot, $snapshot, {
              parent_uuid    => $svol->{node}{uuid},
              received_uuid  => '-',
              readonly       => 1,
              FORCE_PRESERVE => 'preserve forced: created just now',
            });
            $svol->{SNAPSHOT_CREATED} = $snapshot;
          }
          else {
            ABORTED($svol, "Failed to create snapshot: $svol->{PRINT} -> $snapshot->{PRINT}");
            WARN "Skipping subvolume section: " . ABORTED_TEXT($svol);
          }
        }
      }
    }

    #
    # create backups
    #
    if($skip_backups) {
      INFO "Skipping backup creation (btrbk snapshot)";
    }
    else {
      foreach my $sroot (vinfo_subsection($config, 'volume')) {
        foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
          my $snaproot = vinfo_snapshot_root($svol);
          my $snapshot_basename = config_key($svol, "snapshot_name") // die;
          my @related_snapshots = sort({ cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
                                       get_related_snapshots($snaproot, $svol, $snapshot_basename));
          foreach my $droot (vinfo_subsection($svol, 'target')) {
            INFO "Checking for missing backups of subvolume \"$svol->{PRINT}\" in \"$droot->{PRINT}/\"";
            my @schedule;
            my $resume_total = 0;
            my $resume_success = 0;

            foreach my $snapshot (@related_snapshots)
            {
              if(get_receive_targets($droot, $snapshot, exact => 1, warn => 1)){
                DEBUG "Found correlated target of: $snapshot->{PRINT}";
                next;
              }

              DEBUG "Adding backup candidate: $snapshot->{PRINT}";
              push(@schedule, { value      => $snapshot,
                                btrbk_date => $snapshot->{node}{BTRBK_DATE},
                                # not enforcing resuming of latest snapshot anymore (since v0.23.0)
                                # preserve   => $snapshot->{node}{FORCE_PRESERVE},
                              });
            }

            if(scalar @schedule)
            {
              DEBUG "Checking schedule for backup candidates";
              # add all present backups as informative_only: these are needed for correct results of schedule()
              foreach my $vol (@{vinfo_subvol_list($droot, btrbk_direct_leaf => $snapshot_basename)}) {
                push(@schedule, { informative_only => 1,
                                  value      => $vol,
                                  btrbk_date => $vol->{node}{BTRBK_DATE},
                                });
              }
              my ($preserve, undef) = schedule(
                schedule => \@schedule,
                preserve => config_preserve_hash($droot, "target"),
               );
              my @resume = grep defined, @$preserve;   # remove entries with no value from list (target subvolumes)
              $resume_total = scalar @resume;

              foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @resume)
              {
                # Continue gracefully (skip instead of abort) on existing (possibly garbled) target
                if(my $err_vol = vinfo_subvol($droot, $snapshot->{NAME})) {
                  my $err_msg = "Please delete stray subvolumes: \"btrbk clean $droot->{PRINT}\"";
                  FIX_MANUALLY($droot, $err_msg);
                  WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$snapshot->{PRINT}\"";
                  WARN $err_msg;
                  WARN "Skipping backup of: $snapshot->{PRINT}";
                  $droot->{SUBVOL_RECEIVED} //= [];
                  push(@{$droot->{SUBVOL_RECEIVED}}, { ERROR => 1, received_subvolume => $err_vol });
                  next;
                }

                my ($clone_src, $clone_src_extra, $target_parent_node);
                my $parent = get_best_parent($snapshot, $snaproot, $droot,
                                             strict_related     => ((config_key($droot, "incremental") // "") eq "strict"),
                                             clone_src          => \$clone_src,
                                             clone_src_extra    => \$clone_src_extra,
                                             target_parent_node => \$target_parent_node);
                if(macro_send_receive(source => $snapshot,
                                      target => $droot,
                                      parent => $parent,  # this is <undef> if no suitable parent found
                                      clone_src          => $clone_src,
                                      clone_src_extra    => $clone_src_extra,
                                      target_parent_node => $target_parent_node,
                                     ))
                {
                  $resume_success++;
                }
                else {
                  # note: ABORTED flag is already set by macro_send_receive()
                  ERROR("Error while resuming backups, aborting");
                  last;
                }
              }
            }

            if($resume_total) {
              INFO "Created $resume_success/$resume_total missing backups";
            } else {
              INFO "No missing backups found";
            }
          }
        }
      }
    }


    #
    # remove backups following a preserve daily/weekly/monthly scheme
    #
    my $schedule_results;
    if($preserve_snapshots && $preserve_backups) {
      INFO "Preserving all snapshots and backups";
    }
    else
    {
      $schedule_results = [];
      foreach my $sroot (vinfo_subsection($config, 'volume')) {
        foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
          my $snaproot = vinfo_snapshot_root($svol);
          my $snapshot_basename = config_key($svol, "snapshot_name") // die;
          my $target_aborted = 0;
          my @related_snapshots = sort({ cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) }  # sort descending
                                       get_related_snapshots($snaproot, $svol, $snapshot_basename));

          foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
            if(IS_ABORTED($droot)) {
              if(IS_ABORTED($droot, "skip_cmdline_")) {
                $target_aborted ||= -1;
              } else {
                $target_aborted = 1;
              }
              next;
            }

            # preserve latest common snapshot/backup (for incremental targets)
            if(config_key($droot, "incremental")) {
              foreach my $snapshot (@related_snapshots) {
                my @receive_targets = get_receive_targets($droot, $snapshot, exact => 1);
                if(scalar(@receive_targets)) {
                  DEBUG "Force preserve for latest common snapshot: $snapshot->{PRINT}";
                  $snapshot->{node}{FORCE_PRESERVE} = 'preserve forced: latest common snapshot';
                  foreach(@receive_targets) {
                    DEBUG "Force preserve for latest common target: $_->{PRINT}";
                    $_->{node}{FORCE_PRESERVE} = 'preserve forced: latest common target';
                  }
                  last;
                }
              }
            }

            if($preserve_backups) {
              INFO "Preserving all backups";
            }
            else {
              #
              # delete backups
              #
              INFO "Cleaning backups of subvolume \"$svol->{PRINT}\": $droot->{PRINT}/$snapshot_basename.*";
              unless(macro_delete($droot, "", $snapshot_basename, $droot,
                                  { preserve     => config_preserve_hash($droot, "target"),
                                    results      => $schedule_results,
                                    result_hints => { topic => "backup", root_path => $droot->{PATH} },
                                  },
                                  commit => config_key($droot, "btrfs_commit_delete"),
                                  type   => "delete_target",
                                  qgroup => { destroy => config_key($droot, "target_qgroup_destroy"),
                                              type => "qgroup_destroy_target" },
                                 ))
              {
                $target_aborted = 1;
              }
            }
          }

          #
          # delete snapshots
          #
          if($preserve_snapshots) {
            INFO "Preserving all snapshots";
          }
          elsif($target_aborted) {
            if($target_aborted == -1) {
              INFO "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target is skipped by command line argument";
            } else {
              WARN "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target aborted earlier";
            }
          }
          else {
            INFO "Cleaning snapshots" . ($wipe_snapshots ? " (wipe)" : "") . ": $snaproot->{PRINT}/$snapshot_basename.*";
            macro_delete($snaproot, "", $snapshot_basename, $svol,
                         { preserve     => config_preserve_hash($svol, "snapshot", wipe => $wipe_snapshots),
                           results      => $schedule_results,
                           result_hints => { topic => "snapshot", root_path => $snaproot->{PATH} },
                         },
                         commit => config_key($svol, "btrfs_commit_delete"),
                         type   => "delete_snapshot",
                         qgroup => { destroy => config_key($svol, "snapshot_qgroup_destroy"),
                                     type => "qgroup_destroy_snapshot" },
                        );
          }
        }
      }
    }

    my $exit_status = exit_status($config);
    my $time_elapsed = time - $start_time;
    INFO "Completed within: ${time_elapsed}s  (" . localtime(time) . ")";
    action("finished",
           status => $exit_status ? "partial" : "success",
           duration => $time_elapsed,
           message => $exit_status ? "At least one backup task aborted" : undef,
          );
    close_transaction_log();


    unless($quiet)
    {
      #
      # print scheduling results
      #
      if($print_schedule && $schedule_results) {
        my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
        my @data_snapshot = grep { $_->{topic} eq "snapshot" } @data;
        my @data_backup   = grep { $_->{topic} eq "backup"   } @data;

        if(scalar(@data_snapshot)) {
          print_formatted("schedule", \@data_snapshot, title => "SNAPSHOT SCHEDULE", paragraph => 1);
        }
        if(scalar(@data_backup)) {
          print_formatted("schedule", \@data_backup, title => "BACKUP SCHEDULE", paragraph => 1);
        }
      }


      #
      # print summary
      #
      $output_format ||= "custom";
      if($output_format eq "custom")
      {
        my @out;
        foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
          foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
            my @subvol_out;
            if($svol->{SNAPSHOT_UP_TO_DATE}) {
              push @subvol_out, "=== $svol->{SNAPSHOT_UP_TO_DATE}->{PRINT}";
            }
            if($svol->{SNAPSHOT_CREATED}) {
              push @subvol_out, "+++ $svol->{SNAPSHOT_CREATED}->{PRINT}";
            }
            foreach(@{$svol->{SUBVOL_DELETED} // []}) {
              push @subvol_out, "--- $_->{PRINT}";
            }
            foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
              foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
                my $create_mode = "***";
                $create_mode = ">>>" if($_->{parent});
                # substr($create_mode, 0, 1, '%') if($_->{resume});
                $create_mode = "!!!" if($_->{ERROR});
                push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
              }

              foreach(@{$droot->{SUBVOL_DELETED} // []}) {
                push @subvol_out, "--- $_->{PRINT}";
              }

              if(IS_ABORTED($droot, "abort_")) {
                push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
              }
            }

            if(IS_ABORTED($sroot, "abort_")) {
              # repeat volume errors in subvolume context
              push @subvol_out, "!!! Volume \"$sroot->{PRINT}\" aborted: " . ABORTED_TEXT($sroot);
            }
            if(IS_ABORTED($svol, "abort_")) {
              # don't print "<no_action>" on skip_cmdline or skip_noauto
              push @subvol_out, "!!! Aborted: " . ABORTED_TEXT($svol);
            }

            # print "<no_action>" for subvolume, unless aborted by "skip_"
            unless(scalar(@subvol_out) || IS_ABORTED($sroot, "skip_") || IS_ABORTED($svol, "skip_")) {
              @subvol_out = "<no_action>";
            }

            if(@subvol_out) {
              push @out, "$svol->{PRINT}", @subvol_out, "";
            }
          }
        }

        my @cmdline_options = map { "exclude: $_" } @exclude_cmdline;
        push @cmdline_options, "$skip_snapshots: No snapshots created" if($skip_snapshots);
        push @cmdline_options, "$skip_backups: No backups created" if($skip_backups);
        push @cmdline_options, "$preserve_snapshots: Preserved all snapshots" if($preserve_snapshots);
        push @cmdline_options, "$preserve_backups: Preserved all backups" if($preserve_backups);

        print_header(title => "Backup Summary",
                     config => $config,
                     time => $start_time,
                     options => \@cmdline_options,
                     legend => [
                       "===  up-to-date subvolume (source snapshot)",
                       "+++  created subvolume (source snapshot)",
                       "---  deleted subvolume",
                       "***  received subvolume (non-incremental)",
                       ">>>  received subvolume (incremental)",
                      ],
                    );

        print join("\n", @out);
        print_footer($config, $exit_status);
      }
      else
      {
        # print action log (without transaction start messages)
        my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
        print_formatted("transaction", \@data, title => "TRANSACTION LOG");
      }
    }

    exit $exit_status if($exit_status);
  }
}


1;
