2N/A#
2N/A# CDDL HEADER START
2N/A#
2N/A# The contents of this file are subject to the terms of the
2N/A# Common Development and Distribution License (the "License").
2N/A# You may not use this file except in compliance with the License.
2N/A#
2N/A# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A# or http://www.opensolaris.org/os/licensing.
2N/A# See the License for the specific language governing permissions
2N/A# and limitations under the License.
2N/A#
2N/A# When distributing Covered Code, include this CDDL HEADER in each
2N/A# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A# If applicable, add the following below this CDDL HEADER, with the
2N/A# fields enclosed by brackets "[]" replaced with your own identifying
2N/A# information: Portions Copyright [yyyy] [name of copyright owner]
2N/A#
2N/A# CDDL HEADER END
2N/A#
2N/A# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
2N/A#
2N/A
2N/A#
2N/A# Only change PATH if you give full consideration to GNU or other variants
2N/A# of common commands having different arguments and output.
2N/A#
2N/Aexport PATH=/usr/bin:/usr/sbin
2N/Aunset LD_LIBRARY_PATH
2N/A
2N/A#
2N/A# backtrace [message]
2N/A#
2N/A# Used to generate a backtrace (newest on top) of functions up to the caller.
2N/A# Intended for use when internal errors are encountered.
2N/A#
2N/Afunction backtrace {
2N/A typeset msg="$*"
2N/A typeset -a args
2N/A
2N/A #
2N/A # Use "set -u" followed by an unset variable reference to force
2N/A # a backtrace.
2N/A #
2N/A set -A args -- $(exec 2>&1; set -u; unset bt; $bt)
2N/A if [[ -n "$msg" ]]; then
2N/A print -u2 -- "${msg}:"
2N/A else
2N/A print -u2 "Backtrace:"
2N/A fi
2N/A #
2N/A # The ksh backtrace format is unlike that seen in common debuggers and
2N/A # other languages, so the logic below transforms it. That is, we go
2N/A # from a message like the following (but all on one line). do_{a,b,c}
2N/A # are functions that led to the backtrace and ./foo is the script name.
2N/A #
2N/A # ./foo[17]: do_a[6]: do_b[10]: do_c[14]: backtrace: line 47: bt:
2N/A # parameter not set
2N/A #
2N/A # To:
2N/A # do_c[14]:
2N/A # do_b[10]:
2N/A # do_a[6]:
2N/A # ./foo[17]:
2N/A #
2N/A typeset -i i
2N/A #
2N/A # Skip errors about this function as we are reporting on the path
2N/A # that led to this function, not the function itself. From the example
2N/A # above, we remove the arguments that make up "backtrace: line 47: bt:
2N/A # parameter not set"
2N/A #
2N/A for (( i = ${#args[@]} - 1; i >= 0; i-- )); do
2N/A [[ "${args[i]}" == "${.sh.fun}:" ]] && break
2N/A done
2N/A # Print a backtrace, newest on top
2N/A for (( i-- ; i >= 0; i-- )); do
2N/A print -u2 "\t${args[i]}"
2N/A done
2N/A}
2N/A#
2N/A# Send the error message to the screen and to the logfile.
2N/A#
2N/Afunction error {
2N/A typeset fmt="$1"
2N/A shift
2N/A [[ -z "$fmt" ]] && fail_internal "format argument undefined"
2N/A
2N/A printf -- "${MSG_PREFIX}ERROR: ${fmt}\n" "$@"
2N/A [[ -n $LOGFILE ]] && printf "[$(date)] ERROR: ${fmt}\n" "$@" >&2
2N/A}
2N/A
2N/Afunction fatal {
2N/A typeset fmt="$1"
2N/A shift
2N/A [[ -z $EXIT_CODE ]] \
2N/A && fail_internal 'fatal (%s) called with undefined $EXIT_CODE' \
2N/A "$(printf -- "$fmt" "$@")"
2N/A
2N/A error "$fmt" "$@"
2N/A exit $EXIT_CODE
2N/A}
2N/A
2N/Afunction fail_fatal {
2N/A typeset fmt="$1"
2N/A shift
2N/A
2N/A [[ -n "$fmt" ]] && error "$fmt" "$@"
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A exit $ZONE_SUBPROC_FATAL
2N/A}
2N/A
2N/Afunction fail_tryagain {
2N/A typeset fmt="$1"
2N/A [[ -z "$fmt" ]] && fail_internal "format argument undefined"
2N/A
2N/A printf "ERROR: " 1>&2
2N/A printf -- "$@" 1>&2
2N/A printf "\n" 1>&2
2N/A EXIT_CODE=$ZONE_SUBPROC_TRYAGAIN
2N/A exit $ZONE_SUBPROC_TRYAGAIN
2N/A}
2N/A
2N/Afunction fail_usage {
2N/A #
2N/A # format is optional argument to fail_usage: do not fail_internal if
2N/A # none given
2N/A #
2N/A typeset fmt=$1
2N/A shift
2N/A [[ -n $fmt ]] && log "$fmt\n" "$@"
2N/A [[ -n $m_brnd_usage ]] && log "$m_brnd_usage"
2N/A [[ -z $m_usage ]] && fail_internal "m_usage undefined"
2N/A log "$m_usage\n"
2N/A EXIT_CODE=$ZONE_SUBPROC_USAGE
2N/A exit $ZONE_SUBPROC_USAGE
2N/A}
2N/A
2N/Afunction fail_unavailable {
2N/A typeset fmt="$1"
2N/A shift
2N/A
2N/A [[ -n "$fmt" ]] && error "$fmt" "$@"
2N/A EXIT_CODE=$ZONE_SUBPROC_UNAVAILABLE
2N/A exit $ZONE_SUBPROC_UNAVAILABLE
2N/A}
2N/A
2N/A#
2N/A# fail_internal is intended to be used in places where we are checking for
2N/A# logic errors, much as assert is used in C.
2N/A#
2N/Afunction fail_internal {
2N/A typeset fmt=$1
2N/A shift
2N/A backtrace "Unrecoverable internal error"
2N/A
2N/A error "$fmt" "$@"
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A exit $ZONE_SUBPROC_FATAL
2N/A}
2N/A
2N/A#
2N/A# Send the provided printf()-style arguments to the screen and to the logfile.
2N/A#
2N/Afunction log {
2N/A typeset fmt="$1"
2N/A shift
2N/A [[ -z "$fmt" ]] && fail_internal "format argument undefined"
2N/A
2N/A printf -- "${MSG_PREFIX}${fmt}\n" "$@"
2N/A [[ -n $LOGFILE ]] && printf "[$(date)] ${MSG_PREFIX}${fmt}\n" "$@" >&2
2N/A}
2N/A
2N/A#
2N/A# Print provided text to the screen if the shell variable "OPT_V" is set.
2N/A# The text is always sent to the logfile.
2N/A#
2N/Afunction vlog {
2N/A typeset fmt="$1"
2N/A shift
2N/A [[ -z "$fmt" ]] && fail_internal "format argument undefined"
2N/A
2N/A [[ -n $OPT_V ]] && printf -- "${MSG_PREFIX}${fmt}\n" "$@"
2N/A [[ -n $LOGFILE ]] && printf "[$(date)] ${MSG_PREFIX}${fmt}\n" "$@" >&2
2N/A}
2N/A
2N/A# Check that zone is not in the ROOT dataset.
2N/Afunction fail_zonepath_in_rootds {
2N/A typeset gzrootds=$(get_ds_from_path /)
2N/A
2N/A if [[ -z $gzrootds ]]; then
2N/A #
2N/A # This is an internal error because we shouldn't have made it
2N/A # this far if the system wasn't ZFS root.
2N/A #
2N/A fail_internal "Unable to find dataset mounted at /"
2N/A fi
2N/A
2N/A [[ $1 == "$gzrootds"/* ]] && fail_fatal "$f_zfs_in_root"
2N/A}
2N/A
2N/A# Return success if system is labeled (aka Trusted Extensions).
2N/Afunction is_system_labeled {
2N/A [[ -x /bin/plabel ]] || return 1
2N/A /bin/plabel >/dev/null 2>&1 && return 0
2N/A return 1
2N/A}
2N/A
2N/A#
2N/A# Validate that the directory is safe.
2N/A#
2N/A# It is possible for a malicious zone root user to modify a zone's filesystem
2N/A# so that modifications made to the zone's filesystem by administrators in the
2N/A# global zone modify the global zone's filesystem. We can prevent this by
2N/A# ensuring that all components of paths accessed by scripts are real (i.e.,
2N/A# non-symlink) directories.
2N/A#
2N/A# NOTE: The specified path should be an absolute path as would be seen from
2N/A# within the zone. Also, this function does not check parent directories.
2N/A# If, for example, you need to ensure that every component of the path
2N/A# '/foo/bar/baz' is a directory and not a symlink, then do the following:
2N/A#
2N/A# safe_dir /foo
2N/A# safe_dir /foo/bar
2N/A# safe_dir /foo/bar/baz
2N/A#
2N/Afunction safe_dir {
2N/A typeset dir="$1"
2N/A
2N/A if [[ -h $ZONEROOT/$dir || ! -d $ZONEROOT/$dir ]]; then
2N/A fatal "$e_baddir" "$dir"
2N/A fi
2N/A}
2N/A
2N/A# Like safe_dir except the dir doesn't have to exist.
2N/Afunction safe_opt_dir {
2N/A typeset dir="$1"
2N/A
2N/A [[ ! -e $ZONEROOT/$dir ]] && return
2N/A
2N/A if [[ -h $ZONEROOT/$dir || ! -d $ZONEROOT/$dir ]]; then
2N/A fatal "$e_baddir" "$dir"
2N/A fi
2N/A}
2N/A
2N/A# Only make a copy if we haven't already done so.
2N/Afunction safe_backup {
2N/A typeset src="$1"
2N/A typeset dst="$2"
2N/A
2N/A if [[ ! -h $src && ! -h $dst && ! -d $dst && ! -f $dst ]]; then
2N/A /usr/bin/cp -p $src $dst || fatal "$e_badfile" "$src"
2N/A fi
2N/A}
2N/A
2N/A# Make a copy even if the destination already exists.
2N/Afunction safe_copy {
2N/A typeset src="$1"
2N/A typeset dst="$2"
2N/A
2N/A if [[ ! -h $src && ! -h $dst && ! -d $dst ]]; then
2N/A /usr/bin/cp -p $src $dst || fatal "$e_badfile" "$src"
2N/A fi
2N/A}
2N/A
2N/A# Make a recursive copy
2N/Afunction safe_copy_rec {
2N/A typeset src="$1"
2N/A typeset dst="$2"
2N/A
2N/A if [[ ! -h $src && ! -h $dst && ! -d $dst ]]; then
2N/A /usr/bin/cp -pr $src $dst || fatal "$e_badfile" "$src"
2N/A fi
2N/A}
2N/A
2N/A# Move a file
2N/Afunction safe_move {
2N/A typeset src="$1"
2N/A typeset dst="$2"
2N/A
2N/A if [[ ! -h $src && ! -h $dst && ! -d $dst ]]; then
2N/A /usr/bin/mv $src $dst || fatal "$e_badfile" "$src"
2N/A fi
2N/A}
2N/A
2N/Afunction safe_rm {
2N/A if [[ ! -h $ZONEROOT/$1 && -f $ZONEROOT/$1 ]]; then
2N/A rm -f "$ZONEROOT/$1"
2N/A fi
2N/A}
2N/A
2N/A#
2N/A# Replace the file with a wrapper pointing to the native brand code.
2N/A# However, we only do the replacement if the file hasn't already been
2N/A# replaced with our wrapper. This function expects the cwd to be the
2N/A# location of the file we're replacing.
2N/A#
2N/A# Some of the files we're replacing are hardlinks to isaexec so we need to 'rm'
2N/A# the file before we setup the wrapper while others are hardlinks to rc scripts
2N/A# that we need to maintain.
2N/A#
2N/Afunction safe_replace {
2N/A typeset filename="$1"
2N/A typeset runname="$2"
2N/A typeset mode="$3"
2N/A typeset own="$4"
2N/A typeset rem="$5"
2N/A typeset nativedir="$6"
2N/A
2N/A if [ -h $filename -o ! -f $filename ]; then
2N/A return
2N/A fi
2N/A
2N/A egrep -s "Solaris Brand Replacement" $filename
2N/A if [ $? -eq 0 ]; then
2N/A return
2N/A fi
2N/A
2N/A safe_backup $filename $filename.pre_p2v
2N/A if [ $rem = "remove" ]; then
2N/A rm -f $filename
2N/A fi
2N/A
2N/A cat <<-END >$filename || exit 1
2N/A #!/bin/sh -p
2N/A #
2N/A # Solaris Brand Replacement
2N/A #
2N/A # Attention. This file has been replaced with a new version for
2N/A # use in a virtualized environment. Modification of this script is not
2N/A # supported and all changes will be lost upon reboot. The
2N/A # {name}.pre_p2v version of this file is a backup copy of the
2N/A # original and should not be deleted.
2N/A #
2N/A END
2N/A
2N/A echo "__S10_BRAND_NATIVE_PATH=$nativedir;" >>$filename || exit 1
2N/A echo ". $runname \"\$@\"" >>$filename || exit 1
2N/A
2N/A chmod $mode $filename
2N/A chown $own $filename
2N/A}
2N/A
2N/Afunction safe_wrap {
2N/A typeset filename="$1"
2N/A typeset runname="$2"
2N/A typeset mode="$3"
2N/A typeset own="$4"
2N/A
2N/A if [ -f $filename ]; then
2N/A log "$e_cannot_wrap" "$filename"
2N/A exit 1
2N/A fi
2N/A
2N/A cat <<-END >$filename || exit 1
2N/A #!/bin/sh
2N/A #
2N/A # Solaris Brand Wrapper
2N/A #
2N/A # Attention. This file has been created for use in a
2N/A # virtualized environment. Modification of this script
2N/A # is not supported and all changes will be lost upon reboot.
2N/A #
2N/A END
2N/A
2N/A echo ". $runname \"\$@\"" >>$filename || exit 1
2N/A
2N/A chmod $mode $filename
2N/A chown $own $filename
2N/A}
2N/A
2N/A#
2N/A# Read zonecfg fs entries and save the relevant data, one entry per
2N/A# line.
2N/A# This assumes the properties from the zonecfg output, e.g.:
2N/A# fs:
2N/A# dir: /opt
2N/A# special: /opt
2N/A# raw not specified
2N/A# type: lofs
2N/A# options: [noexec,ro,noatime]
2N/A#
2N/A# and it assumes the order of the fs properties as above.
2N/A#
2N/Afunction get_fs_info {
2N/A zonecfg -z $ZONENAME info fs | nawk '{
2N/A if ($1 == "options:") {
2N/A # Remove brackets.
2N/A options=substr($2, 2, length($2) - 2);
2N/A printf("%s %s %s %s\n", dir, type, special, options);
2N/A } else if ($1 == "dir:") {
2N/A dir=$2;
2N/A } else if ($1 == "special:") {
2N/A special=$2;
2N/A } else if ($1 == "type:") {
2N/A type=$2
2N/A }
2N/A }' >> $fstmpfile
2N/A}
2N/A
2N/A#
2N/A# Mount zonecfg fs entries into the zonepath.
2N/A#
2N/Afunction mnt_fs {
2N/A if [ ! -s $fstmpfile ]; then
2N/A return;
2N/A fi
2N/A
2N/A # Sort the fs entries so we can handle nested mounts.
2N/A sort $fstmpfile | nawk -v zonepath=$ZONEPATH '{
2N/A if (NF == 4)
2N/A options="-o " $4;
2N/A else
2N/A options=""
2N/A
2N/A # Create the mount point. Ignore errors since we might have
2N/A # a nested mount with a pre-existing mount point.
2N/A cmd="/usr/bin/mkdir -p " zonepath "/root" $1 " >/dev/null 2>&1"
2N/A system(cmd);
2N/A
2N/A cmd="/usr/sbin/mount -F " $2 " " options " " $3 " " \
2N/A zonepath "/root" $1;
2N/A if (system(cmd) != 0) {
2N/A printf("command failed: %s\n", cmd);
2N/A exit 1;
2N/A }
2N/A }' >>$LOGFILE
2N/A}
2N/A
2N/A#
2N/A# Unmount zonecfg fs entries from the zonepath.
2N/A#
2N/Afunction umnt_fs {
2N/A if [[ -z $fstmpfile || ! -s $fstmpfile ]]; then
2N/A return
2N/A fi
2N/A
2N/A # Reverse sort the fs entries so we can handle nested unmounts.
2N/A sort -r "$fstmpfile" | nawk -v zonepath=$ZONEPATH '{
2N/A cmd="/usr/sbin/umount " zonepath "/root" $1
2N/A if (system(cmd) != 0) {
2N/A printf("command failed: %s\n", cmd);
2N/A }
2N/A }' >>$LOGFILE
2N/A}
2N/A
2N/A#
2N/A# get_dataset path [outvar]
2N/A#
2N/A# Find the dataset mounted at a given path. The implementation is tolerant
2N/A# of the path not being an exact match of the entry in /etc/mnttab (e.g. an
2N/A# extra / somewhere) but the supplied path must be a mount point of a ZFS
2N/A# dataset. If a second argument is provided, it must be the name of a variable
2N/A# into which the result will be stored.
2N/A#
2N/A# On success, returns 0. In the one argument form, the dataset is printed
2N/A# to stdout. In the two argument form, the dataset name is stored in the
2N/A# variable by the name of the second argument.
2N/A# If no match is found, returns 1.
2N/A#
2N/Afunction get_ds_from_path {
2N/A typeset path=$1
2N/A typeset dsn mountpoint
2N/A
2N/A /usr/sbin/zfs list -H -o name,mountpoint "$path" 2>/dev/null | \
2N/A IFS=$'\t' read dsn mountpoint
2N/A [[ -z $dsn || -z $mountpoint ]] && return 1
2N/A
2N/A # If mountpoint=legacy, consult /etc/mnttab.
2N/A if [[ $mountpoint == legacy ]]; then
2N/A mountpoint=$(nawk -F$'\t' -v "dsn=$dsn" \
2N/A '$1 == dsn { print $2; exit }' /etc/mnttab)
2N/A [[ -z $mountpoint ]] && return 1
2N/A fi
2N/A
2N/A [[ $mountpoint -ef $path ]] || return 1
2N/A
2N/A if [[ -n "$2" ]]; then
2N/A typeset -n res=$2
2N/A res=$dsn
2N/A else
2N/A print -- "$dsn"
2N/A fi
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# create_zone_rpool [-e] [-r] zone
2N/A#
2N/A# Establishes the standard dataset hierarchy under <zonepath>/rpool
2N/A#
2N/A# Arguments:
2N/A# -e Do not create rpool/export hierarchy
2N/A# -r Do not create rpool/ROOT or rpool/export hierarchy
2N/A# zone a zone structure created with init_zone
2N/A#
2N/A# Globals:
2N/A#
2N/A# EXIT_CODE Set to ZONE_SUBPROC_FATAL if any dataset that could collide
2N/A# with a future install or attach operation is created.
2N/A#
2N/Afunction create_zone_rpool {
2N/A typeset opt
2N/A typeset -i skipexport=0
2N/A typeset -i skipROOT=0
2N/A
2N/A while getopts :er opt; do
2N/A case $opt in
2N/A e) skipexport=1 ;;
2N/A r) skipROOT=1 ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A case $# in
2N/A 0) fail_internal "$f_int_missing_arg" zonepath_dataset ;;
2N/A 1) typeset -n zone=$1 ;;
2N/A *) shift;
2N/A fail_internal "$f_int_bad_arg" "$*"
2N/A ;;
2N/A esac
2N/A
2N/A #
2N/A # rpool
2N/A #
2N/A typeset ds
2N/A init_dataset ds "${zone.rpool_ds}" >/dev/null 2>&1
2N/A if (( $? != 0 )); then
2N/A zfs create -o zoned=on -o mountpoint=/rpool \
2N/A "${zone.rpool_ds}" || {
2N/A log "$f_zfs_create" "${zone.ROOT_ds}"
2N/A return 1
2N/A }
2N/A else
2N/A zfs_set zoned=on ds || return 1
2N/A zfs_set canmount=on ds || return 1
2N/A zfs_set mountpoint=/rpool ds || return 1
2N/A # Dealing with existing rpool. Perhaps someone intentionally
2N/A # got rid of export dataset. We shouldn't make it come back.
2N/A (( skipexport = 1 ))
2N/A fi
2N/A
2N/A if (( $skipROOT != 0 )); then
2N/A return 0
2N/A fi
2N/A
2N/A #
2N/A # If the zone configuration already has export in it, don't create it.
2N/A # Message that is logged in the event that it exists is inappropriate
2N/A # for this function, so send it to the bit bucket.
2N/A #
2N/A zonecfg_has_export zone >/dev/null 2>&1 && (( skipexport = 1 ))
2N/A
2N/A #
2N/A # rpool/ROOT
2N/A #
2N/A init_dataset ds "${zone.ROOT_ds}" >/dev/null 2>&1
2N/A if (( $? != 0 )); then
2N/A zfs create -o canmount=noauto -o mountpoint=legacy \
2N/A "${zone.ROOT_ds}" || {
2N/A log "$f_zfs_create" "${zone.ROOT_ds}"
2N/A return 1
2N/A }
2N/A else
2N/A zfs inherit zoned "${zone.ROOT_ds}"
2N/A typeset prop
2N/A for prop in canmount=noauto mountpoint=legacy; do
2N/A zfs_set "$prop" ds || return 1
2N/A done
2N/A fi
2N/A # zfs_set doesn't do -r so use zfs command directly.
2N/A zfs set -r canmount=noauto "${zone.ROOT_ds}" || return 1
2N/A
2N/A #
2N/A # rpool/export
2N/A #
2N/A if (( skipexport == 0 )); then
2N/A zfs create -o mountpoint=/export "${zone.rpool_ds}/export" || {
2N/A log "$f_zfs_create" "${zone.rpool_ds}/export"
2N/A return 1
2N/A }
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A zfs create "${zone.rpool_ds}/export/home" || {
2N/A log "$f_zfs_create" "${zone.rpool_ds}/export/home"
2N/A return 1
2N/A }
2N/A fi
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# create_active_ds [-s snapshot] [-r] zone
2N/A#
2N/A# Set up ZFS dataset hierarchy for the zone root dataset and the datasets
2N/A# listed in zone.new_be_datasets. If an active dataset is being created
2N/A# as a clone of another BE (that is, -s is used), the caller is responsible for
2N/A# cloning any child datasets and zone.new_be_datasets is ignored.
2N/A#
2N/A# Arguments and options:
2N/A#
2N/A# -s snapshot If specified, the active dataset is cloned from this snapshot.
2N/A# With -s, the caller is responsible for cloning any child
2N/A# datasets. That is, /var is not created automatically.
2N/A# -r If specified, only create rpool, not ROOT, export or zbe
2N/A# zone zone structure created with init_zone.
2N/A#
2N/A# Globals:
2N/A#
2N/A# EXIT_CODE Set to ZONE_SUBPROC_FATAL if any ZBE or rpool/export is
2N/A# created.
2N/A#
2N/Afunction create_active_ds {
2N/A typeset snapshot opt
2N/A typeset -i skipROOT=0
2N/A
2N/A while getopts :s:r opt; do
2N/A case $opt in
2N/A s) snapshot="$OPTARG" ;;
2N/A r) skipROOT=1 ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A case $# in
2N/A 0) fail_internal "$f_int_missing_arg" zone ;;
2N/A 1) : ;;
2N/A *) fail_internal "$f_int_bad_arg" "$*" ;;
2N/A esac
2N/A typeset -n zone=$1
2N/A
2N/A #
2N/A # Find the zone's current dataset. This should have been created by
2N/A # zoneadm.
2N/A #
2N/A [[ -z "${zone.path.ds}" ]] && fail_fatal "$f_no_ds"
2N/A
2N/A # Check that zone is not in the ROOT dataset.
2N/A fail_zonepath_in_rootds "${zone.path.ds}"
2N/A
2N/A #
2N/A # Create the zone's rpool, rpool/ROOT, rpool/export, etc. If creating
2N/A # from a snapshot (part of cloning process) assume the caller has
2N/A # already created it.
2N/A #
2N/A if [[ -z "$snapshot" ]]; then
2N/A if (( $skipROOT == 1 )); then
2N/A # Does not set EXIT_CODE.
2N/A create_zone_rpool -r zone || return 1
2N/A return 0
2N/A fi
2N/A # Sets EXIT_CODE.
2N/A create_zone_rpool zone || return 1
2N/A fi
2N/A
2N/A #
2N/A # Create the new active dataset with "zfs create" or "zfs clone",
2N/A # depending on whether a snapshot was passed. If the create or clone
2N/A # operation fails 100 times, it's likely it will never succeed.
2N/A #
2N/A typeset bename dsname
2N/A typeset -a zfs_prop_options
2N/A #
2N/A # mountpoint=/ is safe because create_zone_rpool verifies zoned=on
2N/A # for parent.
2N/A #
2N/A typeset -a be_props
2N/A set -A be_props -- -o canmount=noauto -o mountpoint=/
2N/A
2N/A typeset -a sl_opt
2N/A if is_system_labeled; then
2N/A # On TX, reset the mlslabel upon cloning
2N/A set -A sl_opt -- -o mlslabel=none
2N/A fi
2N/A
2N/A typeset -i i
2N/A typeset be_prefix
2N/A if [[ ${zone.brand} == "solaris10" ]]; then
2N/A be_prefix=zbe
2N/A else
2N/A be_prefix=solaris
2N/A fi
2N/A for (( i = 0 ; i < 100 ; i++ )); do
2N/A bename=$(printf "%s-%d" "$be_prefix" $i)
2N/A dsname="${zone.ROOT_ds}/$bename"
2N/A
2N/A if [[ -n "$snapshot" ]]; then
2N/A vlog "Cloning active_ds $dsname from $snapshot"
2N/A zfs clone "${be_props[@]}" "${sl_opt[@]}" "$snapshot" \
2N/A "$dsname" >/dev/null 2>&1 && break
2N/A else
2N/A vlog "Creating active_ds $dsname"
2N/A zfs create "${be_props[@]}" "$dsname" \
2N/A >/dev/null 2>&1 && break
2N/A fi
2N/A bename=
2N/A dsname=
2N/A done
2N/A [[ -z $bename ]] && return 1
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A
2N/A # If clone wasn't used, create the child datasets, if any.
2N/A if [[ -z $snapshot ]]; then
2N/A typeset child
2N/A for child in ${zone.new_be_datasets[@]}; do
2N/A vlog "Creating child dataset: %s" "$child"
2N/A zfs create -o mountpoint="/$child" -o canmount=noauto \
2N/A "$dsname/$child" || return 1
2N/A done
2N/A fi
2N/A
2N/A # Activate the BE.
2N/A set_active_be zone "$bename" || return 1
2N/A}
2N/A
2N/A#
2N/A# Perform validation and cleanup in the zoneroot after unpacking the archive.
2N/A#
2N/Afunction post_unpack {
2N/A #
2N/A # Check if the image was created with a valid libc.so.1.
2N/A #
2N/A hwcap=$(moe -v -32 $ZONEROOT/lib/libc.so.1 2>&1)
2N/A if (( $? != 0 )); then
2N/A vlog "$f_hwcap_info" "$hwcap"
2N/A fail_fatal "$f_sanity_hwcap"
2N/A fi
2N/A
2N/A ( cd "$ZONEROOT" && \
2N/A find . \( -type b -o -type c \) -exec rm -f "{}" \; )
2N/A
2N/A # Create directories that, when missing, befuddle zoneadm mount
2N/A # and various SMF services running in the zone.
2N/A typeset -A dirs
2N/A set -A dirs[tmp] 1777 root sys
2N/A set -A dirs[dev] 755 root sys
2N/A set -A dirs[etc/zones] 755 root sys
2N/A typeset dir
2N/A for dir in ${!dirs[@]}; do
2N/A safe_opt_dir /$dir
2N/A [[ -d $ZONEROOT/$dir ]] && continue
2N/A typeset mode=${dirs[$dir][0]}
2N/A typeset user=${dirs[$dir][1]}
2N/A typeset group=${dirs[$dir][2]}
2N/A mkdir -m "$mode" "$ZONEROOT/$dir" ||
2N/A fatal "$f_mkdir" "$ZONEROOT/$dir"
2N/A chown "$user:$group" "$ZONEROOT/$dir" ||
2N/A fatal "$f_chown" "$ZONEROOT/$dir"
2N/A done
2N/A}
2N/A
2N/A#
2N/A# Determine flar compression style from identification file.
2N/A#
2N/Afunction get_compression {
2N/A typeset ident=$1
2N/A typeset line=$(grep "^files_compressed_method=" $ident)
2N/A
2N/A print ${line##*=}
2N/A}
2N/A
2N/A#
2N/A# Determine flar archive style from identification file.
2N/A#
2N/Afunction get_archiver {
2N/A typeset ident=$1
2N/A typeset line=$(grep "^files_archived_method=" $ident)
2N/A
2N/A print ${line##*=}
2N/A}
2N/A
2N/A#
2N/A# Get the archive base.
2N/A#
2N/A# We must unpack the archive in the right place within the zonepath so
2N/A# that files are installed into the various mounted filesystems that are set
2N/A# up in the zone's configuration. These are already mounted for us by the
2N/A# mntfs function.
2N/A#
2N/A# Archives can be made of either a physical host's root file system or a
2N/A# zone's zonepath. For a physical system, if the archive is made using an
2N/A# absolute path (/...) we can't use it. For a zone the admin can make the
2N/A# archive from a variety of locations;
2N/A#
2N/A# a) zonepath itself: This will be a single dir, probably named with the
2N/A# zone name, it will contain a root dir and under the root we'll see all
2N/A# the top level dirs; etc, var, usr... We must be above the ZONEPATH
2N/A# when we unpack the archive but this will only work if the the archive's
2N/A# top-level dir name matches the ZONEPATH base-level dir name. If not,
2N/A# this is an error.
2N/A#
2N/A# b) inside the zonepath: We'll see root and it will contain all the top
2N/A# level dirs; etc, var, usr.... We must be in the ZONEPATH when we unpack
2N/A# the archive.
2N/A#
2N/A# c) inside the zonepath root: We'll see all the top level dirs, ./etc,
2N/A# ./var, ./usr.... This is also the case we see when we get an archive
2N/A# of a physical sytem. We must be in ZONEROOT when we unpack the archive.
2N/A#
2N/A# Note that there can be a directory named "root" under the ZONEPATH/root
2N/A# directory.
2N/A#
2N/A# This function handles the above possibilities so that we reject absolute
2N/A# path archives and figure out where in the file system we need to be to
2N/A# properly unpack the archive into the zone. It sets the ARCHIVE_BASE
2N/A# variable to the location where the achive should be unpacked.
2N/A#
2N/Afunction get_archive_base {
2N/A stage1=$1
2N/A archive=$2
2N/A stage2=$3
2N/A
2N/A vlog "$m_analyse_archive"
2N/A
2N/A base=$($stage1 $archive | $stage2 2>/dev/null | nawk -F/ '{
2N/A # Check for an absolute path archive
2N/A if (substr($0, 1, 1) == "/")
2N/A exit 1
2N/A
2N/A if ($1 != ".")
2N/A dirs[$1] = 1
2N/A else
2N/A dirs[$2] = 1
2N/A }
2N/A END {
2N/A for (d in dirs) {
2N/A cnt++
2N/A if (d == "bin") sawbin = 1
2N/A if (d == "etc") sawetc = 1
2N/A if (d == "root") sawroot = 1
2N/A if (d == "var") sawvar = 1
2N/A }
2N/A
2N/A if (cnt == 1) {
2N/A # If only one top-level dir named root, we are in the
2N/A # zonepath, otherwise this must be an archive *of*
2N/A # the zonepath so print the top-level dir name.
2N/A if (sawroot)
2N/A print "*zonepath*"
2N/A else
2N/A for (d in dirs) print d
2N/A } else {
2N/A # We are either in the zonepath or in the zonepath/root
2N/A # (or at the top level of a full system archive which
2N/A # looks like the zonepath/root case). Figure out which
2N/A # one.
2N/A if (sawroot && !sawbin && !sawetc && !sawvar)
2N/A print "*zonepath*"
2N/A else
2N/A print "*zoneroot*"
2N/A }
2N/A }')
2N/A
2N/A if (( $? != 0 )); then
2N/A umnt_fs
2N/A fatal "$e_absolute_archive"
2N/A fi
2N/A
2N/A if [[ "$base" == "*zoneroot*" ]]; then
2N/A ARCHIVE_BASE=$ZONEROOT
2N/A elif [[ "$base" == "*zonepath*" ]]; then
2N/A ARCHIVE_BASE=$ZONEPATH
2N/A else
2N/A # We need to be in the dir above the ZONEPATH but we need to
2N/A # validate that $base matches the final component of ZONEPATH.
2N/A bname=$(basename $ZONEPATH)
2N/A
2N/A if [[ "$bname" != "$base" ]]; then
2N/A umnt_fs
2N/A fatal "$e_mismatch_archive" "$base" "$bname"
2N/A fi
2N/A ARCHIVE_BASE=$(dirname $ZONEPATH)
2N/A fi
2N/A}
2N/A
2N/A#
2N/A# Unpack cpio archive into zoneroot.
2N/A#
2N/Afunction install_cpio {
2N/A stage1=$1
2N/A archive=$2
2N/A
2N/A get_archive_base "$stage1" "$archive" "cpio -it"
2N/A
2N/A cpioopts="-idmP@/fE $fscpiofile"
2N/A
2N/A vlog "cd \"$ARCHIVE_BASE\" && $stage1 \"$archive\" | cpio $cpioopts"
2N/A
2N/A # Ignore errors from cpio since we expect some errors depending on
2N/A # how the archive was made.
2N/A ( cd "$ARCHIVE_BASE" && $stage1 "$archive" | cpio $cpioopts )
2N/A
2N/A post_unpack
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# Unpack pax archive into zoneroot.
2N/A#
2N/Afunction install_pax {
2N/A typeset archive=$1
2N/A typeset filtopt
2N/A
2N/A get_archive_base "cat" "$archive" "pax"
2N/A
2N/A if [[ -n $fspaxfile && -s $fspaxfile ]]; then
2N/A filtopt="-c $(/usr/bin/cat $fspaxfile)"
2N/A fi
2N/A
2N/A vlog "cd \"$ARCHIVE_BASE\" && pax -r@/ -p e -f \"$archive\" $filtopt"
2N/A
2N/A # Ignore errors from pax since we expect some errors depending on
2N/A # how the archive was made.
2N/A ( cd "$ARCHIVE_BASE" && pax -r@/ -p e -f "$archive" $filtopt )
2N/A
2N/A post_unpack
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# Unpack UFS dump into zoneroot.
2N/A#
2N/Afunction install_ufsdump {
2N/A archive=$1
2N/A
2N/A vlog "cd \"$ZONEROOT\" && ufsrestore rf \"$archive\""
2N/A
2N/A #
2N/A # ufsrestore goes interactive if you ^C it. To prevent that,
2N/A # we make sure its stdin is not a terminal.
2N/A #
2N/A ( cd "$ZONEROOT" && ufsrestore rf "$archive" < /dev/null )
2N/A result=$?
2N/A
2N/A post_unpack
2N/A
2N/A return $result
2N/A}
2N/A
2N/A#
2N/A# Copy directory hierarchy into zoneroot.
2N/A#
2N/Afunction install_dir {
2N/A source_dir=$1
2N/A
2N/A cpioopts="-pPdm@/"
2N/A
2N/A first=1
2N/A filt=$(for i in $(cat $fspaxfile)
2N/A do
2N/A echo $i | egrep -s "/" && continue
2N/A if [[ $first == 1 ]]; then
2N/A printf "^%s" $i
2N/A first=0
2N/A else
2N/A printf "|^%s" $i
2N/A fi
2N/A done)
2N/A
2N/A list=$(cd "$source_dir" && ls -d * | egrep -v "$filt")
2N/A flist=$(for i in $list
2N/A do
2N/A printf "%s " "$i"
2N/A done)
2N/A findopts="-xdev ( -type d -o -type f -o -type l ) -print"
2N/A
2N/A vlog "cd \"$source_dir\" && find $flist $findopts | "
2N/A vlog "cpio $cpioopts \"$ZONEROOT\""
2N/A
2N/A # Ignore errors from cpio since we expect some errors depending on
2N/A # how the archive was made.
2N/A ( cd "$source_dir" && find $flist $findopts | \
2N/A cpio $cpioopts "$ZONEROOT" )
2N/A
2N/A post_unpack
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# This is a common function for laying down a zone image from a variety of
2N/A# different sources. This can be used to either install a fresh zone or as
2N/A# part of zone migration during attach.
2N/A#
2N/A# The first argument specifies the type of image: archive, directory or stdin.
2N/A# The second argument specifies the image itself. In the case of stdin, the
2N/A# second argument specifies the format of the stream (cpio, flar, etc.).
2N/A# Any validation or post-processing on the image is done elsewhere.
2N/A#
2N/A# This function calls a 'sanity_check' function which must be provided by
2N/A# the script which includes this code.
2N/A#
2N/A# Returns:
2N/A# Returns 0 on success. Always exits on failure.
2N/A#
2N/A# Globals:
2N/A# EXIT_CODE Set to ZONE_SUBPROC_UNAVAILABLE on successful return.
2N/A#
2N/Afunction install_image {
2N/A typeset -n zone=$1
2N/A typeset intype=$2
2N/A typeset insrc=$3
2N/A
2N/A if [[ -z ${zone.name} || -z $intype || -z $insrc ]]; then
2N/A fail_internal "Missing argument to install_image. Got: '%s'" \
2N/A "$*"
2N/A fi
2N/A
2N/A typeset filetype="unknown"
2N/A typeset filetypename="unknown"
2N/A typeset filetypeprefx=
2N/A typeset stage1="cat"
2N/A
2N/A if [[ "$intype" == "directory" ]]; then
2N/A if [[ "$insrc" == "-" ]]; then
2N/A # Indicates that the existing zonepath is prepopulated.
2N/A filetype="existing"
2N/A filetypename="existing"
2N/A else
2N/A if [[ "$(echo $insrc | cut -c 1)" != "/" ]]; then
2N/A fatal "$e_path_abs" "$insrc"
2N/A fi
2N/A
2N/A if [[ ! -e "$insrc" ]]; then
2N/A log "$e_not_found" "$insrc"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A
2N/A if [[ ! -r "$insrc" ]]; then
2N/A log "$e_not_readable" "$insrc"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A
2N/A if [[ ! -d "$insrc" ]]; then
2N/A log "$e_not_dir"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A
2N/A sanity_check $insrc
2N/A
2N/A filetype="directory"
2N/A filetypename="directory"
2N/A fi
2N/A
2N/A else
2N/A # Common code for both archive and stdin stream.
2N/A
2N/A if [[ "$intype" == "archive" ]]; then
2N/A if [[ $insrc != /* ]]; then
2N/A log "$e_path_abs" "$insrc"
2N/A fatal "$e_install_abort"
2N/A elif [[ ! -f "$insrc" ]]; then
2N/A log "$e_not_found" "$insrc"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A ftype="$(LC_ALL=C file "$insrc" | cut -d: -f 2)"
2N/A
2N/A #
2N/A # If it is a compressed stream, extract the first
2N/A # megabyte into a temporary file to figure out what
2N/A # kind of data is in the file.
2N/A #
2N/A case "$ftype" in
2N/A *bzip2*)
2N/A stage1=bzcat
2N/A filetypeprefx="bzipped "
2N/A ;;
2N/A *gzip*) stage1=gzcat
2N/A filetypeprefix="gzipped "
2N/A ;;
2N/A esac
2N/A
2N/A if [[ $stage1 != cat ]]; then
2N/A typeset tastefile=$(mktemp)
2N/A [[ -n $tastefile ]] || fatal "$e_tmpfile"
2N/A
2N/A "$stage1" "$insrc" | dd of=$tastefile \
2N/A bs=1024k count=1 2>/dev/null
2N/A ftype="$(LC_ALL=C file "$tastefile" \
2N/A | cut -d: -f 2)"
2N/A rm -f "$tastefile"
2N/A fi
2N/A elif [[ $intype == stdin ]]; then
2N/A # For intype == stdin, the insrc parameter specifies
2N/A # the stream format coming on stdin.
2N/A ftype="$insrc"
2N/A insrc="-"
2N/A else
2N/A fail_internal "intype '%s' is invalid" "$intype"
2N/A fi
2N/A
2N/A
2N/A # Setup vars for the archive type we have.
2N/A case "$ftype" in
2N/A *cpio*) filetype="cpio"
2N/A filetypename="cpio archive"
2N/A ;;
2N/A *ufsdump*)
2N/A if [[ ${zone.brand} != solaris10 ]]; then
2N/A log "$e_unsupported_archive" "$ftype" \
2N/A "${zone.brand}"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A filetype="ufsdump"
2N/A filetypename="ufsdump archive"
2N/A ;;
2N/A flar|flash|*Flash\ Archive*)
2N/A if [[ ${zone.brand} != solaris10 ]]; then
2N/A log "$e_unsupported_archive" "$ftype" \
2N/A "${zone.brand}"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A filetype="flar"
2N/A filetypename="flash archive"
2N/A ;;
2N/A tar|*USTAR\ tar\ archive)
2N/A filetype="tar"
2N/A filetypename="tar archive"
2N/A ;;
2N/A pax|*USTAR\ tar\ archive\ extended\ format*)
2N/A filetype="xustar"
2N/A filetypename="pax (xustar) archive"
2N/A ;;
2N/A zfs|*ZFS\ snapshot\ stream*)
2N/A filetype="zfs"
2N/A filetypename="ZFS send stream"
2N/A ;;
2N/A *) log "$e_unsupported_archive" "$ftype" "${zone.brand}"
2N/A fatal "$e_install_abort"
2N/A ;;
2N/A esac
2N/A fi
2N/A
2N/A # compressed archives only supported for cpio and zfs
2N/A if [[ $stage1 != cat ]]; then
2N/A filetypename="${filetypeprefx}$filetypename"
2N/A if [[ $filetype != cpio && $filetype != zfs ]]; then
2N/A log "$e_unsupported_archive" "$filetypename" \
2N/A "${zone.brand}"
2N/A fatal "$e_install_abort"
2N/A fi
2N/A fi
2N/A
2N/A vlog "$filetypename"
2N/A
2N/A if [[ $filetype != @(existing|zfs|flar) ]]; then
2N/A #
2N/A # Since we're not using a pre-existing ZFS dataset layout, or
2N/A # an archive containing a dataset layout, create the zone
2N/A # datasets and mount them.
2N/A #
2N/A
2N/A # Sets EXIT_CODE.
2N/A create_active_ds zone || fatal "$f_no_active_ds"
2N/A mount_active_be -c zone || fatal "$f_mount_active_be"
2N/A
2N/A # If the brand supports candidate zbes, tag this as a candidate.
2N/A if [[ -n $PROP_CANDIDATE ]]; then
2N/A zfs set "$PROP_CANDIDATE=on" "${zone.active_ds}" ||
2N/A fatal "$e_zfs_set" "$PROP_CANDIDATE=on" \
2N/A "${zone.active_ds}"
2N/A fi
2N/A fi
2N/A
2N/A fstmpfile=$(/usr/bin/mktemp -t -p /var/tmp)
2N/A if [[ -z "$fstmpfile" ]]; then
2N/A fatal "$e_tmpfile"
2N/A fi
2N/A
2N/A # Make sure we always have the files holding the directories to filter
2N/A # out when extracting from a CPIO or PAX archive. We'll add the fs
2N/A # entries to these files in get_fs_info()
2N/A fscpiofile=$(/usr/bin/mktemp -t -p /var/tmp fs.cpio.XXXXXX)
2N/A if [[ -z "$fscpiofile" ]]; then
2N/A rm -f $fstmpfile
2N/A fatal "$e_tmpfile"
2N/A fi
2N/A
2N/A # Filter out these directories.
2N/A cat >>$fscpiofile <<-EOF
2N/A dev/*
2N/A devices/*
2N/A devices
2N/A proc/*
2N/A tmp/*
2N/A var/run/*
2N/A system/contract/*
2N/A system/object/*
2N/A system/volatile/*
2N/A rpool/boot/*
2N/A rpool/boot
2N/A rpool/etc/*
2N/A rpool/etc
2N/A EOF
2N/A
2N/A fspaxfile=$(/usr/bin/mktemp -t -p /var/tmp fs.pax.XXXXXX)
2N/A if [[ -z "$fspaxfile" ]]; then
2N/A rm -f $fstmpfile $fscpiofile
2N/A fatal "$e_tmpfile"
2N/A fi
2N/A
2N/A print -n "dev devices proc tmp var/run system/contract system/object" \
2N/A "system/volatile rpool/boot rpool/etc" >>$fspaxfile
2N/A
2N/A # Set up any fs mounts so the archive will install into the correct locations.
2N/A if [[ $filetype != @(existing|zfs|flar) ]]; then
2N/A get_fs_info
2N/A mnt_fs
2N/A if (( $? != 0 )); then
2N/A umnt_fs >/dev/null 2>&1
2N/A rm -f $fstmpfile $fscpiofile $fspaxfile
2N/A fatal "$mount_failed"
2N/A fi
2N/A fi
2N/A
2N/A if [[ $filetype == existing ]]; then
2N/A if [[ -z ${zone.zbe_cloned_from} ]]; then
2N/A log "$no_installing"
2N/A else
2N/A log "$from_clone" "${zone.zbe_cloned_from}"
2N/A fi
2N/A else
2N/A log "$installing"
2N/A fi
2N/A
2N/A #
2N/A # Install the image into the zonepath.
2N/A #
2N/A unpack_result=0
2N/A if [[ "$filetype" == "cpio" ]]; then
2N/A install_cpio "$stage1" "$insrc"
2N/A unpack_result=$?
2N/A
2N/A elif [[ "$filetype" == "flar" ]]; then
2N/A # Sets EXIT_CODE.
2N/A $stage1 $insrc | install_flar zone
2N/A unpack_result=$?
2N/A
2N/A elif [[ "$filetype" == "xustar" ]]; then
2N/A install_pax "$insrc"
2N/A unpack_result=$?
2N/A
2N/A elif [[ "$filetype" == "tar" ]]; then
2N/A vlog "cd \"${zone.root}\" && tar -xf \"$insrc\""
2N/A # Ignore errors from tar since we expect some errors depending
2N/A # on how the archive was made.
2N/A ( cd "${zone.root}" && tar -xf "$insrc" )
2N/A unpack_result=0
2N/A post_unpack
2N/A
2N/A elif [[ "$filetype" == "ufsdump" ]]; then
2N/A install_ufsdump "$insrc"
2N/A unpack_result=$?
2N/A
2N/A elif [[ "$filetype" == "directory" ]]; then
2N/A install_dir "$insrc"
2N/A unpack_result=$?
2N/A
2N/A elif [[ "$filetype" == "zfs" ]]; then
2N/A # Sets EXIT_CODE.
2N/A extract_zfs zone "$stage1" "$insrc"
2N/A unpack_result=$?
2N/A fi
2N/A
2N/A # Clean up any fs mounts used during unpacking.
2N/A umnt_fs
2N/A rm -f $fstmpfile $fscpiofile $fspaxfile
2N/A
2N/A chmod 700 "${zone.path}"
2N/A
2N/A (( unpack_result != 0 )) && fatal "$f_unpack_failed"
2N/A
2N/A #
2N/A # We are now far enough along that the admin may be able to fix up an
2N/A # extracted/copied image in the event that an attach fails. Instead of
2N/A # deleting the new datasets, mark them as pinned so the error path
2N/A # doesn't delete them. Spit a message if pinning fails, but don't
2N/A # abort the operation.
2N/A #
2N/A if [[ $filetype != existing ]]; then
2N/A pin_datasets "${zone.path.ds}" || error "$f_pin"
2N/A fi
2N/A EXIT_CODE=$ZONE_SUBPROC_UNAVAILABLE
2N/A
2N/A # Verify this is a valid image.
2N/A mount_active_be -C zone
2N/A sanity_check "${zone.root}"
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# extract_zfs zone filter file
2N/A#
2N/A# Receive the zfs stream from the specified file. The stream is passed through
2N/A# the specified filter, such as gzcat or bzcat. If no filter is needed,
2N/A# use "cat" as the filter. zone should have been initialized by init_zone.
2N/A# File can be a regular file or /dev/stdin.
2N/A#
2N/A# On successful creation, the active ZBE is mounted on the zone root.
2N/A#
2N/A# Globals:
2N/A#
2N/A# EXIT_CODE Set to ZONE_SUBPROC_FATAL while temporary extraction dataset
2N/A# exists. Set to ZONE_SUBPROC_UNAVAILABLE on success.
2N/A#
2N/A# Returns the return value from "zfs receive". May exit with fatal errors.
2N/A#
2N/Afunction extract_zfs {
2N/A typeset -n zone=$1
2N/A typeset stage1=$2
2N/A typeset insrc=$3
2N/A
2N/A #
2N/A # Receive the stream into a temporary dataset then move the datasets
2N/A # into place. Be careful while doing this that the recieved datasets
2N/A # don't get mounted.
2N/A #
2N/A zfs create -o zoned=on "${zone.path.ds}/installtmp" || fatal "$f_no_ds"
2N/A
2N/A #
2N/A # Be sure that an uninstall is forced if we are interrupted before
2N/A # the install or attach completes.
2N/A #
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A
2N/A typeset -a cmd
2N/A set -A cmd zfs receive -F -u -x zoned "${zone.path.ds}/installtmp/ds"
2N/A
2N/A vlog "$stage1 $insrc | ${cmd[*]}"
2N/A # $stage1 intentionally not quoted to support commands with arguments.
2N/A $stage1 "$insrc" | "${cmd[@]}"
2N/A typeset unpack_result=$?
2N/A
2N/A if ! rationalize_datasets zone "${zone.path.ds}/installtmp/ds"; then
2N/A zfs destroy "${zone.path.ds}/installtmp" || \
2N/A error "$e_zfs_destroy" "${zone.path.ds}/installtmp"
2N/A #
2N/A # If rationalize_datasets returned early because it could not
2N/A # find an active ZBE, it will have set EXIT_CODE to
2N/A # ZONE_SUBPROC_TRYAGAIN In such a case, pin the rationalized
2N/A # datasets so that cleanup functions don't remove them. Also,
2N/A # avoid the call to fatal(), as that would give an improper
2N/A # error message. Set the state to unavailable so that another
2N/A # extraction can't be tried until the existing one is cleaned
2N/A # up via uninstall.
2N/A #
2N/A if [[ $EXIT_CODE == $ZONE_SUBPROC_TRYAGAIN ]]; then
2N/A EXIT_CODE=$ZONE_SUBPROC_UNAVAILABLE
2N/A pin_datasets ${zone.rpool_ds} && exit $EXIT_CODE
2N/A #
2N/A # Could not pin datasets, so they will probably
2N/A # get destroyed by the exit handler.
2N/A #
2N/A fail_fatal "$f_pin"
2N/A fi
2N/A fatal "$f_invalid_data"
2N/A fi
2N/A
2N/A #
2N/A # If the destroy fails, this will trigger a failure in
2N/A # zoneadm verify.
2N/A #
2N/A zfs destroy "${zone.path.ds}/installtmp" || \
2N/A fatal "$e_zfs_destroy" "${zone.path.ds}/installtmp"
2N/A
2N/A [[ $unpack_result == 0 ]] && EXIT_CODE=$ZONE_SUBPROC_UNAVAILABLE
2N/A
2N/A return $unpack_result
2N/A}
2N/A
2N/A#
2N/A# rationalize_datasets zone dsname
2N/A#
2N/A# dsname is the top-level dataset that should contain at least a BE and maybe
2N/A# other zone datasets in an unknown hierarchy. rationalize_datasets looks
2N/A# through the dataset and data hierarchy found there to find a way to
2N/A# shoehorn it into the proper dataset layout.
2N/A#
2N/A# Upon successful conversion:
2N/A#
2N/A# - $dsname will no longer exist
2N/A# - zoned, mountpoint, canmount will have been set properly
2N/A# - the active BE will have been determined and mounted.
2N/A# - 0 is returned
2N/A#
2N/A# If the rationalization fails, 1 is returned and the zone and $dsname are in
2N/A# a undetermined state.
2N/A#
2N/A# EXIT_CODE May be set as described by discover_active_be(), which is
2N/A# brand-specific.
2N/A#
2N/Afunction rationalize_datasets {
2N/A typeset -n zone=$1
2N/A typeset topdsn=$2
2N/A typeset dsn
2N/A
2N/A typeset rpooldsn ROOTdsn
2N/A
2N/A #
2N/A # This forms the logic of guessing where things are at.
2N/A #
2N/A
2N/A # Build an associative array of datasets in the source area.
2N/A typeset -A dsa
2N/A get_datasets -A "$topdsn" dsa || return 1
2N/A
2N/A #
2N/A # Look for any of the following dataset layouts. The layouts are
2N/A # describe from the perspective of the source system. In the table,
2N/A # <rootpool> refers to the the root pool, typically "rpool", on the
2N/A # source system. <zpds> refers to the zone path dataset - the dataset
2N/A # mounted at the zonepath.
2N/A #
2N/A # Source Layout Dataset provided as arg to zfs send Check# Notes
2N/A # ------ ----------- ----------------------------------- ------ -----
2N/A # GZ global $rootpool 2 1
2N/A # GZ global $rootpool/ROOT 3 1
2N/A # GZ global $rootpool/ROOT/$be 4 2
2N/A # NGZ s10 $zpds 5 2
2N/A # NGZ s11x2010.11 $zpds 1 1
2N/A # NGZ s11 $zpds/rpool 2 1
2N/A # NGZ any !s10 $zpds/rpool/ROOT 3 1
2N/A # $zpds/ROOT 1
2N/A # NGZ any $zpds/rpool/ROOT/$be 4 2
2N/A # $zpds/ROOT/$be 2
2N/A #
2N/A # The Layout column refers to the following dataset hierarchies:
2N/A #
2N/A # GZ global: Same across s10 and s11, expcept s11 adds
2N/A # rpool/export/home
2N/A # rpool
2N/A # rpool/ROOT
2N/A # rpool/ROOT/$be
2N/A # rpool/export
2N/A #
2N/A # NGZ s10:
2N/A # $zpds (A single BE exists in the "root" subdirectory.)
2N/A #
2N/A # NGZ s11express: Applies to Solaris 11 Express 2010.11.
2N/A # $zpds
2N/A # $zpds/ROOT
2N/A # $zpds/ROOT/$be
2N/A #
2N/A # NGZ s11:
2N/A # $zpds
2N/A # $zpds/rpool
2N/A # $zpds/rpool/ROOT
2N/A # $zpds/rpool/ROOT/$be
2N/A # $zpds/rpool/export
2N/A # $zpds/rpool/export/home
2N/A #
2N/A # any:
2N/A # Any of the above layouts are supported.
2N/A #
2N/A # any !s10:
2N/A # Any of the above layouts except NGZ s10 are supported.
2N/A #
2N/A # Notes:
2N/A #
2N/A # 1. The archive must be created with "zfs send -R", "zfs send -r",
2N/A # or "zfs send -rc". Note that "zfs send -r" first appears in
2N/A # Solaris 11.
2N/A # 2. The archive may be created with any of the options specified
2N/A # in Note 1 (assuming support in that Solaris release) or
2N/A # without the -R or -r[c] options to zfs send.
2N/A #
2N/A
2N/A # Check 1
2N/A if [[ -n ${dsa[$topdsn/rpool]} && \
2N/A -n ${dsa[$topdsn/rpool/ROOT]} ]]; then
2N/A rpooldsn=$topdsn/rpool
2N/A ROOTdsn=$topdsn/rpool/ROOT
2N/A # Check 2
2N/A elif [[ -n ${dsa[$topdsn/ROOT]} ]]; then
2N/A rpooldsn=$topdsn
2N/A ROOTdsn=$topdsn/ROOT
2N/A # Check for 3, 4, and 5 - We need to mount it to figure it out.
2N/A else
2N/A typeset dir=$(mktemp -d)
2N/A # We know that it is zoned from the way it was received.
2N/A zfs set canmount=noauto "$topdsn" && \
2N/A zfs set mountpoint=/ $topdsn ||
2N/A vlog "Unable to set properties for mounting %s" "$topdsn"
2N/A if zfs_tmpmount "$topdsn" "$dir" >/dev/null 2>&1; then
2N/A if [[ -d $dir/usr && -d $dir/var && -d $dir/etc ]]
2N/A then
2N/A # Looks like the inside of a BE (Check 4)
2N/A rpooldsn=
2N/A ROOTdsn=$(dirname "$topdsn")
2N/A elif [[ ${zone.brand} == "solaris10" && \
2N/A -d $dir/root/usr && -d $dir/root/var && \
2N/A -d $dir/root/etc ]]; then
2N/A # Looks like an S10 zonepath dataset (Check 5)
2N/A rpooldsn=
2N/A ROOTdsn=$(dirname "$topdsn")
2N/A # Fix it to look like Check 4, above.
2N/A convert_s10_zonepath_to_be "$dir" || {
2N/A umount -f "$dir" >/dev/null 2>&1
2N/A return 1
2N/A }
2N/A else
2N/A # Must be a ROOT at $topdsn (Check 3)
2N/A rpooldsn=
2N/A ROOTdsn=$topdsn
2N/A fi
2N/A umount -f "$dir" >/dev/null 2>&1
2N/A else
2N/A # Must be a ROOT at $topdsn (Check 3)
2N/A rpooldsn=
2N/A ROOTdsn=$topdsn
2N/A fi
2N/A rmdir "$dir" >/dev/null 2>&1
2N/A fi
2N/A
2N/A # Create rpool and rpool/ROOT if it doesen't already exist.
2N/A create_zone_rpool -e zone || return 1
2N/A
2N/A if [[ -n $rpooldsn ]]; then
2N/A #
2N/A # Now look for datasets that collide
2N/A #
2N/A typeset -a collide_ds
2N/A typeset -a move_ds
2N/A typeset -A seen_ds
2N/A /usr/sbin/zfs list -H -o name -t filesystem,volume -r \
2N/A "$rpooldsn" | while read dsn; do
2N/A [[ $dsn == "$rpooldsn" ]] && continue
2N/A [[ $dsn == "$rpooldsn"/ROOT ]] && continue
2N/A
2N/A # dataset name relative to rpooldsn
2N/A typeset rdsn=${dsn#$rpooldsn/}
2N/A
2N/A if /usr/sbin/zfs list "${zone.rpool_ds}/$rdsn" \
2N/A >/dev/null 2>&1; then
2N/A #
2N/A # keep track of collisions that can be deleted
2N/A # for possible removal in reverse order
2N/A #
2N/A if ds_empty "${zone.rpool_ds}/$rdsn"; then
2N/A a_push collide_ds "$rdsn"
2N/A continue
2N/A fi
2N/A log "$e_ds_conflict" "$rdsn" \
2N/A "${zone.rpool_ds}/$rdsn"
2N/A (( collide++ ))
2N/A continue
2N/A fi
2N/A
2N/A #
2N/A # ZBEs will be handled below, as they need to be
2N/A # tagged all at once after any colliding ZBEs are
2N/A # renamed.
2N/A #
2N/A [[ $dsn == "$rpooldsn"/ROOT/* ]] && continue
2N/A
2N/A #
2N/A # If the parent of this dataset has already been added
2N/A # to the move list (or not added because its parent was
2N/A # added), it will be moved with its parent. Don't try
2N/A # to move it after it is already gone.
2N/A #
2N/A seen_ds[$rdsn]=1
2N/A [[ -n ${seen_ds[$(dirname "$rdsn")]} ]] && continue
2N/A
2N/A a_push move_ds "$rdsn"
2N/A done
2N/A
2N/A if (( collide != 0 )); then
2N/A return 1
2N/A fi
2N/A
2N/A for dsn in "${collide_ds[@]}"; do
2N/A vlog "Removing empty dataset '%s' due to collision" \
2N/A "$dsn"
2N/A zfs destroy "$dsn"
2N/A done
2N/A
2N/A for dsn in "${move_ds[@]}"; do
2N/A vlog "Dataset '%s' received from archive" \
2N/A "${zone.rpool_ds}/$dsn"
2N/A zfs rename "$rpooldsn/$dsn" "${zone.rpool_ds}/$dsn" ||
2N/A return 1
2N/A done
2N/A fi
2N/A
2N/A #
2N/A # The zone's rpool dataset (if any) has been migrated, except for the
2N/A # be(s) found in $ROOTdsn. Merge them in with any existing zbes.
2N/A #
2N/A typeset -a bes
2N/A typeset be newbe
2N/A typeset -A zone.allowed_bes # used by discover_active_be
2N/A typeset -A allowed_bes
2N/A typeset -A bemap
2N/A typeset -i i
2N/A typeset be_prefix
2N/A if [[ ${zone.brand} == "solaris10" ]]; then
2N/A be_prefix=zbe
2N/A else
2N/A be_prefix=solaris
2N/A fi
2N/A tag_candidate_zbes "$ROOTdsn" bes allowed_bes || return 1
2N/A for be in "${bes[@]}"; do
2N/A # Use the next available BE name
2N/A for (( i = 0 ; i < 100 ; i++ )); do
2N/A newbe=$be_prefix-$i
2N/A
2N/A #
2N/A # Try to claim this BE. If it fails, it probably means
2N/A # that there is already a BE by that name and we should
2N/A # try to claim the next one.
2N/A #
2N/A if /usr/sbin/zfs rename "$ROOTdsn/$be" \
2N/A "${zone.ROOT_ds}/$newbe" >/dev/null 2>&1; then
2N/A break
2N/A fi
2N/A newbe=
2N/A done
2N/A if [[ -z $newbe ]]; then
2N/A error "$e_be_move_failed" "$be"
2N/A return 1
2N/A fi
2N/A
2N/A [[ -n ${allowed_bes[$be]} ]] && zone.allowed_bes[$newbe]=1
2N/A done
2N/A #
2N/A # The only datasets that may still exist under $topdsn are $rpooldsn
2N/A # and $ROOTdsn. Verify that. If it all checks out, destroy $topdsn
2N/A # and all of its children and call it a day.
2N/A #
2N/A typeset -i errors=0 delete_topdsn=0
2N/A /usr/sbin/zfs list -H -o name -t filesystem,volume "$topdsn" \
2N/A 2>/dev/null | while read dsn; do
2N/A # Assuming there are no errors, $topdsn should be deleted.
2N/A (( delete_topdsn=1 ))
2N/A [[ $dsn == "$topdsn" ]] && continue
2N/A [[ $dsn == "$rpooldsn" ]] && continue
2N/A [[ $dsn == "$ROOTdsn" ]] && continue
2N/A log "$e_unexpected_ds" "${dsn#$topdsn/}"
2N/A (( errors++ ))
2N/A done
2N/A (( errors == 0 )) || return 1
2N/A
2N/A (( delete_topdsn )) && zfs destroy -r "$topdsn"
2N/A
2N/A fix_zone_rpool_props zone || return 1
2N/A
2N/A #
2N/A # If the zone doesn't have /export, set up the /export dataset
2N/A # hierarchy. Since this isn't strictly necessary for the zone
2N/A # to work, do not fail the attach if creation fails.
2N/A #
2N/A if ! zonecfg_has_export zone && ! /usr/sbin/zfs list -H -o name \
2N/A "${zone.rpool_ds/export}" >/dev/null 2>&1; then
2N/A vlog "Creating /export"
2N/A if ! zfs create -o mountpoint=/export "${zone.rpool_ds}/export"
2N/A then
2N/A log "$f_zfs_create" "${zone.rpool_ds}/export"
2N/A else
2N/A zfs create "${zone.rpool_ds}/export/home" ||
2N/A log "$f_zfs_create" "${zone.rpool_ds}/export/home"
2N/A fi
2N/A fi
2N/A
2N/A # May set EXIT_CODE.
2N/A discover_active_be zone || return 1
2N/A mount_active_be -C zone || return 1
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# ds_empty <datasetname>
2N/A#
2N/A# Returns 0 if the dataset has no snapshots and there are no files or
2N/A# directories in the dataset. Assumes the dataset is a filesystem.
2N/A#
2N/Afunction ds_empty {
2N/A typeset dsn="$1"
2N/A
2N/A # If any snapshots or descendant datasets exist, it's not empty.
2N/A typeset -i children
2N/A children=$(/usr/sbin/zfs list -Hr -d 1 -t snapshot -o name "$dsn" | \
2N/A awk 'END {print NR}')
2N/A if (( children > 1 )); then
2N/A vlog "Dataset %s has %d snapshots" "$dsn" $(( children - 1 ))
2N/A return 1
2N/A fi
2N/A
2N/A #
2N/A # If it's already mounted, look inside it. Be careful not to descend
2N/A # into datasets mounted on it to avoid false positives. Note that we
2N/A # ignore mount points for already mounted datasets. This is important
2N/A # for the case where an empty BE that contains a separate /var is
2N/A # mounted.
2N/A #
2N/A if [[ "$(zfs list -H -o mounted "$dsn")" == yes ]]; then
2N/A typeset mntpt
2N/A mntpt=$(zfs list -H -o mountpoint "$dsn") || return 1
2N/A
2N/A # Only look at the first line or two to see if it is empty.
2N/A # If it contains only directories, those are likely
2N/A # mountpoints or ancestors of mountpoints.
2N/A find "$mntpt" -mount -type d -o -print | awk 'NR > 1 { exit 1 }
2N/A END { if (NR == 0) { exit 0 } else { exit 1}}'
2N/A if (( $? != 0 )); then
2N/A vlog "Dataset %s mounted at %s is not empty" "$dsn" \
2N/A "$mntpt"
2N/A return 1
2N/A fi
2N/A return 0
2N/A fi
2N/A
2N/A
2N/A # Mount it to see if there are any files in it
2N/A typeset dir=$(mktemp -d)
2N/A zfs_tmpmount "$dsn" "$dir" || {
2N/A vlog "Unable to mount dataset %s on %s" "$dsn" "$dir"
2N/A rmdir "$dir" || \
2N/A vlog "Unable to clean up temporary directory at %s" "$dir"
2N/A return 1
2N/A }
2N/A
2N/A typeset contents
2N/A contents=$(ls -A "$dir")
2N/A umount "$dir" && rmdir $dir || \
2N/A vlog "Unable to clean up temporary mount of %s at %s" "$dsn" "$dir"
2N/A [[ -n "$contents" ]] && return 1
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# fix_zone_rpool_props zone
2N/A#
2N/A# Troll through the zone's rpool dataset and fix the properties on datasets
2N/A# such that the BE's are likely to mount.
2N/A#
2N/Afunction fix_zone_rpool_props {
2N/A typeset -n zone=$1
2N/A
2N/A vlog "Fixing properties on zone datasets"
2N/A
2N/A zfs set zoned=on "${zone.rpool_ds}"
2N/A typeset dsn
2N/A zfs list -H -o name -d 1 -t filesystem,volume "${zone.rpool_ds}" | \
2N/A while read dsn; do
2N/A [[ $dsn == "${zone.rpool_ds}" ]] && continue
2N/A zfs inherit -r zoned $dsn || return 1
2N/A done
2N/A
2N/A typeset be_dsn=
2N/A zfs list -H -o name -t filesystem "${zone.ROOT_ds}" | \
2N/A while read dsn; do
2N/A [[ $dsn == "${zone.ROOT_ds}" ]] && continue
2N/A if [[ $dsn != "${zone.ROOT_ds}"/*/* ]]; then
2N/A be_dsn=$dsn
2N/A else
2N/A # Fix mountpoint .../zbe-0/var -> /var
2N/A zfs set mountpoint=${dsn#$be_dsn} "$dsn" || return 1
2N/A fi
2N/A zfs set canmount=noauto "$dsn" || return 1
2N/A done
2N/A}
2N/A
2N/A#
2N/A# attach_datasets -m install_media -t install_type zone
2N/A#
2N/A# Attaches datasets then performs any required installation tasks.
2N/A#
2N/A# Options and arguments
2N/A#
2N/A# -m install_media If install_media is '-', attempt to find a ZBE to
2N/A# attach. The selection is performed by the brand's
2N/A# discover_active_be() function.
2N/A# -t install_type Can be any value accepted by install_image.
2N/A# zone zone structure initialized by init_zone.
2N/A#
2N/A# Globals
2N/A# EXIT_CODE Depending on the level of success, may be set to
2N/A# ZONE_SUBPROC_UNAVAILABLE (returns 0),
2N/A# ZONE_SUBPROC_FATAL (returns 1, datasets partially
2N/A# extracted)
2N/A#
2N/A# Return values
2N/A#
2N/A# 0 on success, else exits.
2N/A# Exits with failure if:
2N/A# - zonepath is in the global zone's ROOT dataset.
2N/A# - active BE could not be found
2N/A# - the ZFS properties on the active BE could not be set
2N/A# - the active BE could not be mounted on the zoneroot
2N/A#
2N/Afunction attach_datasets {
2N/A typeset opt
2N/A typeset install_media= inst_type=
2N/A while getopts :m:t: opt; do
2N/A case $opt in
2N/A m) install_media=$OPTARG ;;
2N/A t) inst_type=$OPTARG ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A [[ -z "$install_media" ]] && fail_internal "$f_int_missing_opt" m
2N/A [[ -z "$inst_type" ]] && fail_internal "$f_int_missing_opt" t
2N/A shift $(( OPTIND - 1 ))
2N/A case $# in
2N/A 0) fail_internal "$f_int_missing_arg" "zone" ;;
2N/A 1) : ;;
2N/A *) fail_internal "$f_int_bad_arg" "$*" ;;
2N/A esac
2N/A typeset -n zone=$1
2N/A
2N/A # Validate that the zonepath is not in the root dataset.
2N/A fail_zonepath_in_rootds "${zone.path.ds}"
2N/A
2N/A #
2N/A # Fix mountpoint and other properties for ZBEs detached using the
2N/A # old scheme.
2N/A #
2N/A if ! convert_old_detached_zbes zone; then
2N/A # So long as any failed conversions didn't leave anything
2N/A # mounted on the zone root, allow the attach to continue.
2N/A get_ds_from_path "${zone.root}" && fatal "$f_detach_convert"
2N/A fi
2N/A
2N/A if [[ "$install_media" == "-" ]]; then
2N/A discover_active_be zone || return 1
2N/A elif [[ $inst_type == zbe ]]; then
2N/A claim_zbe zone "$install_media" || return 1
2N/A inst_type=directory
2N/A install_media=-
2N/A fi
2N/A
2N/A #
2N/A # The zone's datasets are now in place.
2N/A #
2N/A # Sets EXIT_CODE.
2N/A install_image zone "$inst_type" "$install_media"
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# claim_zbe zone
2N/A#
2N/A# This function exists here only to give a clear error message in the event
2N/A# that attach_datasets() calls brand-specific functionality not appropriate
2N/A# to this brand. Brands that support claim_zbe() must define it.
2N/A#
2N/A# claim_zbe will be called if zoneadm is invoked as
2N/A#
2N/A# zoneadm -z <zone> attach -z <zbe>
2N/A#
2N/A# As such, any brand that doesn't support -z should have bailed before calling
2N/A# attach_datasets.
2N/A#
2N/Afunction claim_zbe {
2N/A # If we make it to here, it is programmer error.
2N/A fail_internel "%s not defined from this brand" "$0"
2N/A}
2N/A
2N/A#
2N/A# convert_s10_zonepath_to_zbe dir
2N/A#
2N/A# This function exists here only to give a clear error message in the event
2N/A# that attach_datasets() calls brand-specific functionality not appropriate
2N/A# to this brand.
2N/A#
2N/Afunction convert_s10_zonepath_to_be {
2N/A # Anyone that has called this from common code should have already
2N/A # checked the brand.
2N/A fail_internal "$s10_zbe_not_supported"
2N/A}
2N/A
2N/A#
2N/A# tag_candidate_zbes ROOTdsn [be_array_name [curgz_assoc_array_name]]
2N/A#
2N/A# This generic function only returns the list of zbes found in the specified
2N/A# dataset. A brand-specific function may exist for brands that have more
2N/A# sophisticated zbe management needs.
2N/A#
2N/A# ROOTdsn The name of a dataset that contains zbes.
2N/A# be_array_name If specified, this variable will contain an array
2N/A# of candidate zbes on return.
2N/A# curgz_assoc_array_name Only used by some brands, not implemented in this
2N/A# implementation. Intended to return the list zbes
2N/A# associated with the current global zone in an
2N/A# associative array.
2N/A#
2N/A# Returns 0 if all went well and at least one zbe exists, else 1.
2N/A#
2N/Afunction tag_candidate_zbes {
2N/A (( $# < 2 )) && return 0
2N/A
2N/A typeset ROOTdsn=$1
2N/A typeset -n bes=$2
2N/A
2N/A typeset dsn
2N/A /usr/sbin/zfs list -H -o name -r -d 1 -t filesystem "$ROOTdsn" \
2N/A 2>/dev/null | while read dsn; do
2N/A [[ $dsn == "$ROOTdsn" ]] && continue
2N/A a_push bes "$(basename "$dsn")"
2N/A done
2N/A if (( ${#bes[@]} == 0 )); then
2N/A error "$e_no_active_be"
2N/A fi
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# convert_old_detached_zbes zone
2N/A#
2N/A# Earlier releases left detached datasets mounted on the zone root. This
2N/A# function cleans those up, if needed.
2N/A#
2N/A# Arguments:
2N/A#
2N/A# zone zone structure initialized by init_zone.
2N/A#
2N/A# Return:
2N/A#
2N/A# 0 Nothing unexpected happened. There is no longer anything mounted on
2N/A# the zone root
2N/A# 1 One or more ZBEs could not be converted.
2N/A#
2N/Afunction convert_old_detached_zbes {
2N/A typeset -n zone=$1
2N/A typeset retval=0
2N/A typeset first=true # Is this the first ZBE converted?
2N/A
2N/A #
2N/A # Look at each ZBE. Ignore zfs list result, as it is OK to call this
2N/A # on a zone that has no ZBEs
2N/A #
2N/A /usr/sbin/zfs list -H -o name,mountpoint -r -d 1 "${zone.ROOT_ds}" \
2N/A 2>/dev/null | while IFS=$'\t' read dsn zbe_mntpt; do
2N/A
2N/A # Skip the ROOT dataset
2N/A [[ $dsn == "${zone.ROOT_ds}" ]] && continue;
2N/A
2N/A #
2N/A # If the ZBE's mount point is already set to /, this doesn't
2N/A # look like a detached zbe. Because the currently configured
2N/A # zone root may be different than the zone root on some other
2N/A # host where this storage may have previously been presented,
2N/A # all the references to the zone root are based on the mount
2N/A # point of the BE's top level dataset rather than the currently
2N/A # configured zone root.
2N/A #
2N/A [[ $zbe_mntpt == / ]] && continue
2N/A
2N/A log "$m_convert_detached" "$(basename "$dsn")"
2N/A
2N/A #
2N/A # Before doing anything that causes unmounts, get a list of
2N/A # datasets that exist under the ZBE's top dataset, as well as
2N/A # their properties. This will be used when fixing up
2N/A # properties later.
2N/A #
2N/A typeset -a dsslist # indexed array of datasets in dsn
2N/A typeset -A dsnbydir # associative array indexed by mntpt
2N/A get_datasets -p "$dsn" dsslist || fatal "$f_no_active_ds"
2N/A typeset -i i errors=0
2N/A for (( i = 0; i < ${#dsslist[@]}; i++ )); do
2N/A typeset -n dss=dsslist[$i] # ref to current item
2N/A
2N/A # Ignore things that don't get mounted
2N/A [[ ${dss.props[type].value} == filesystem ]] || \
2N/A continue
2N/A
2N/A # figure out where it is mounted
2N/A mountpt=${dss.props[mountpoint].value}
2N/A
2N/A # Legacy mountpoints do not need to be fixed.
2N/A [[ $mountpt == legacy ]] && continue
2N/A
2N/A # Make mountpoint relative to BE root
2N/A if [[ $mountpt == "$zbe_mntpt" ]]; then
2N/A mountpt=/
2N/A elif [[ $mountpt == ${zbe_mntpt}/* ]]; then
2N/A mountpt=${mountpt#${zbe_mntpt}}
2N/A fi
2N/A if [[ -n ${dsnbydir[$mountpt]} ]]; then
2N/A error "$e_ds_mnt_multiply_defined" "$mountpt"
2N/A (( errors++ ))
2N/A mountpt=
2N/A fi
2N/A if [[ -n $mountpt ]]; then
2N/A dsnbydir[$mountpt]=$i
2N/A fi
2N/A done
2N/A
2N/A #
2N/A # Allow progression through all ZBEs, converting those that
2N/A # can be converted.
2N/A #
2N/A if (( errors != 0 )); then
2N/A retval=1
2N/A continue
2N/A fi
2N/A
2N/A if $first; then
2N/A first=false
2N/A # Set up proper attributes on the ROOT dataset.
2N/A typeset rootds rpoolds
2N/A init_dataset rpoolds "${zone.rpool_ds}"
2N/A init_dataset rootds "${zone.ROOT_ds}"
2N/A
2N/A #
2N/A # Unmount the BE so that we can fix up mounts. Note
2N/A # that if file systems are mounted with temporary mount
2N/A # points, the persistent mountpoint property is hidden.
2N/A #
2N/A unmount_be zone || return 1
2N/A
2N/A if ! zfs_set zoned=on rpoolds ||
2N/A ! zfs_set canmount=noauto rootds ||
2N/A ! zfs_set mountpoint=legacy rootds; then
2N/A # If datasets above ZBEs can't be fixed,
2N/A # return immediately. zfs_set has already
2N/A # given an error message.
2N/A return 1
2N/A fi
2N/A fi
2N/A
2N/A #
2N/A # Walk through any remaining datasets and fix mount points
2N/A #
2N/A typeset -a mntlist
2N/A get_sorted_subscripts dsnbydir mntlist
2N/A for dir in "${mntlist[@]}"; do
2N/A typeset -n ds=dsslist[${dsnbydir[$dir]}]
2N/A refresh_dataset ds
2N/A if ! fix_ds_mountpoint ds "$dir"; then
2N/A retval=1
2N/A fi
2N/A done
2N/A done
2N/A
2N/A return $retval
2N/A}
2N/A
2N/A#
2N/A# get_datasets [-A] [-t type] [-p] dataset array_name
2N/A#
2N/A# Updates indexed array (or associative array with -A) named array_name with
2N/A# the names of datasets found under the given dataset, including the given
2N/A# dataset. Use of an array generated with this function is preferable to "for
2N/A# ds in $(zfs list -r ...)" because this is tolerant of dataset names that
2N/A# contain spaces.
2N/A#
2N/A# Example:
2N/A#
2N/A# typeset -a array
2N/A# get_datasets [options] $dataset array
2N/A# for ds in "${array[@]}"; do
2N/A# ...
2N/A# done
2N/A#
2N/A# Returns 0 on success or 1 if dataset was not found.
2N/A# Note: No error messages are printed if no dataset is found.
2N/A#
2N/Afunction get_datasets {
2N/A #
2N/A # Option and argument processing
2N/A #
2N/A typeset opt var dstype=filesystem assoc
2N/A typeset -i getprops=0
2N/A while getopts :Apt: opt; do
2N/A case $opt in
2N/A A) assoc=1 ;;
2N/A p) getprops=1 ;;
2N/A t) dstype=$OPTARG ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A shift $(( $OPTIND - 1 ))
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" dataset
2N/A [[ -z "$2" ]] && fail_internal "$f_int_missing_arg" array
2N/A
2N/A typeset dataset="$1"
2N/A typeset -n array="$2"
2N/A unset array
2N/A [[ -n $assoc ]] && typeset -A array
2N/A
2N/A #
2N/A # Build the list of datasets
2N/A #
2N/A typeset ds
2N/A typeset -i index=0
2N/A /usr/sbin/zfs list -H -o name -t $dstype -r "$dataset" 2>/dev/null \
2N/A | while read ds; do
2N/A if (( getprops )); then
2N/A if [[ -n $assoc ]]; then
2N/A array[$ds]=
2N/A init_dataset "array[$ds]" "$ds"
2N/A else
2N/A array[$index]=
2N/A init_dataset "array[$index]" "$ds"
2N/A fi
2N/A else
2N/A if [[ -n $assoc ]]; then
2N/A array[$ds]=$ds
2N/A else
2N/A array[$index]="$ds"
2N/A fi
2N/A fi
2N/A (( index++ ))
2N/A done
2N/A
2N/A if (( index == 0 )); then
2N/A return 1
2N/A fi
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# snapshot_zone_rpool zone snapformat snapname
2N/A#
2N/A# Creates a recursive snapshot of the specified zone.
2N/A#
2N/A# Arguments
2N/A#
2N/A# zone A zone, initialized with init_zone.
2N/A# snapformat A printf-friendly string that includes %d in it.
2N/A# snapname Upon return, this variable will contain the name of the
2N/A# snapshot. This should be the name of the variable, without
2N/A# a $.
2N/A#
2N/A# Globals:
2N/A# PATH Must contain /sbin or /usr/sbin.
2N/A#
2N/A# Return
2N/A# 0 Success, $snapname can be trusted
2N/A# 1 Fail, $snapname may have garbage
2N/A# exit If an internal error occurs
2N/A#
2N/Afunction snapshot_zone_rpool {
2N/A #
2N/A # Option/Argument processing
2N/A #
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" zone
2N/A [[ -z "$2" ]] && fail_internal "$f_int_missing_arg" snapformat
2N/A [[ -z "$3" ]] && fail_internal "$f_int_missing_arg" snapname
2N/A typeset -n zone="$1"
2N/A typeset snap_fmt="$2"
2N/A typeset -n snapname=$3
2N/A
2N/A #
2N/A # Find a name that works for the snapshot
2N/A #
2N/A typeset rpool_ds=${zone.rpool_ds}
2N/A typeset -i i
2N/A for (( i=0; i < 100; i++ )); do
2N/A snapname=$(printf -- "$snap_fmt" $i)
2N/A zfs snapshot -r "$rpool_ds@$snapname" >/dev/null 2>&1 \
2N/A && return 0
2N/A done
2N/A
2N/A # No name found, fail
2N/A return 1
2N/A}
2N/A
2N/A#
2N/A# clone_zone_rpool srczone dstzone snapname
2N/A#
2N/A# Clones the active BE dataset and other non-BE datasets from one zone to
2N/A# another. If srczone and dstzone are the same zone, the only effect is that
2N/A# a new boot environment is created.
2N/A#
2N/A# Upon successful return, the specified snapshot will have been marked for
2N/A# deffered destruction with 'zfs destroy -d'
2N/A#
2N/A# Options and Arguments
2N/A#
2N/A# srczone A zone structure, initialized with init_zone.
2N/A# dstzone A zone structure, initialized with init_zone.
2N/A# snapname The name of the snapshot (part after @) from
2N/A# snapshot_zone_rpool.
2N/A#
2N/A# Globals:
2N/A#
2N/A# EXIT_CODE Set to $ZONE_SUBPROC_FATAL if one or more datasets have been
2N/A# created but not cleaned up.
2N/A# Set to $ZONE_SUBPROC_TRYAGAIN if the operation failed and
2N/A# cleanup was successful.
2N/A#
2N/A# Return
2N/A# 0 Success
2N/A# 1 Fail
2N/A#
2N/Afunction clone_zone_rpool {
2N/A typeset -n s="$1" d="$2"
2N/A typeset snapname="$3"
2N/A
2N/A typeset -a dslist props
2N/A typeset -i propcnt=0
2N/A typeset dsname newdsname snap
2N/A typeset dss newdss # dataset structure
2N/A typeset -i clone_made clone_reqd
2N/A typeset -a sl_opt
2N/A
2N/A #
2N/A # When cloning a BE within a zone, s and d will refer to the same
2N/A # zone. create_active_ds will adjust d.active_ds, which is the
2N/A # same as s.active_ds. To be sure that cloning of the source BE's
2N/A # child datasets happen, we need to remember what the initial active
2N/A # BE was.
2N/A #
2N/A typeset src_active_ds=${s.active_ds}
2N/A
2N/A #
2N/A # In order to see the persistent value of mountpoints, datasets
2N/A # must be unmounted.
2N/A #
2N/A # Note that this causes problems for cloning from snapshots, which is
2N/A # still awaiting implementation for non-native brands.
2N/A #
2N/A unmount_be s || return 1
2N/A get_datasets -t filesystem,volume "${s.rpool_ds}" dslist || return 1
2N/A
2N/A if is_system_labeled; then
2N/A # On TX, reset the mlslabel upon cloning
2N/A set -A sl_opt -- -o mlslabel=none
2N/A fi
2N/A
2N/A #
2N/A # If the source and destination are in different pools, the datasets
2N/A # will be copied with "zfs send | zfs recv". However, this is tricky.
2N/A #
2N/A # - If the destination zone already exists (i.e. in a different global
2N/A # zone BE), there may be ZBE collisions.
2N/A # - The source zone may itself be a clone of yet another zone. This
2N/A # implies the need for zfs send -[r]c.
2N/A # - The datasets within the source zone may have clones (e.g.
2N/A # rpool/export/a may have been cloned to rpool/export/b). These
2N/A # "internal clones" should be preserved in the destination zone.
2N/A #
2N/A # To deal with this, the following approach is used. This only applies
2N/A # to the cross-pool clone scenario.
2N/A #
2N/A # - <zpds>: (zonepath dataset) should have already been created by
2N/A # zoneadm.
2N/A # - <zpds>/rpool/ROOT/*: Datasets that are not part of the active
2N/A # ZBE have the snapshot that was specified by the snapname argument
2N/A # to this function deleted.
2N/A # - <zpds>/rpool: After the snapshot removal described above, the
2N/A # source zone's rpool dataset is copied to the destination pool
2N/A # at <zpds>/clonetmp.
2N/A #
2N/A # After the datasets are copied to the destination pool,
2N/A # rataionalize_datasets() is used to put them into place.
2N/A #
2N/A typeset -i xpool=0
2N/A [[ ${s.path.ds%%/*} != ${d.path.ds%%/*} ]] && xpool=1
2N/A
2N/A for dsname in "${dslist[@]}"; do
2N/A init_dataset dss "$dsname"
2N/A newdsname=${dss.name/${s.path.ds}/${d.path.ds}}
2N/A snap="${dss.name}@$snapname"
2N/A clone_made=0
2N/A clone_reqd=0
2N/A
2N/A # zvols are not supported inside of a boot environment
2N/A if [[ ${dss.name} == "${s.ROOT_ds}/"* ]]; then
2N/A typeset dstype
2N/A #
2N/A # The following zfs call should only fail if some
2N/A # is removing or renaming datasets while this is
2N/A # running. If someone is doing that, abort the clone
2N/A # operation because it's likely that something will
2N/A # break.
2N/A #
2N/A dstype=$(zfs get -H -o value type "${dss.name}") || \
2N/A return 1
2N/A if [[ $dstype == volume ]]; then
2N/A error "$e_volume_in_bootenv" "${dss.name}"
2N/A return 1
2N/A fi
2N/A fi
2N/A
2N/A #
2N/A # Filter through the datasets to throw away snapshots that
2N/A # will not be cloned. Set other flags that will be needed
2N/A # for post-processing.
2N/A #
2N/A case "${dss.name}" in
2N/A $src_active_ds) # Clone the active boot env
2N/A (( xpool )) && continue;
2N/A # The BE name need not be the same in src and dst.
2N/A # Find the first available BE name by cloning.
2N/A # Sets EXIT_CODE.
2N/A create_active_ds -s "$snap" d || return 1
2N/A (( clone_made=1 ))
2N/A newdsname=${dss.name/${src_active_ds}/${d.active_ds}}
2N/A ;;
2N/A $src_active_ds/*) # Clone the active boot env nested ds
2N/A (( xpool )) && continue;
2N/A # Rejigger the name to match the BE name picked above
2N/A newdsname=${dss.name/${src_active_ds}/${d.active_ds}}
2N/A (( clone_reqd=1 ))
2N/A ;;
2N/A ${s.ROOT_ds}/*) # Do not clone inactive BE
2N/A # If we are just creating a new BE in an existing zone,
2N/A # don't worry about this dataset.
2N/A [[ ${s.name} == ${d.name} ]] && continue
2N/A vlog "Not cloning %s: not part of source active BE" \
2N/A "$snap"
2N/A if (( xpool )); then
2N/A #
2N/A # Normally we allow deferred destroy of
2N/A # snapshots, just in case something (e.g. a
2N/A # backup) has a hold on a snapshot. We need
2N/A # to be a bit more stringent here, as this
2N/A # snapshot must not exist when the 'zfs send'
2N/A # starts so as to prevent copying inactive
2N/A # ZBEs to the clone zone.
2N/A #
2N/A zfs destroy $snap || return 1
2N/A fi
2N/A continue
2N/A ;;
2N/A *) # Clone everything else, if needed.
2N/A (( xpool )) && continue;
2N/A # If we are just creating a new BE in an existing zone,
2N/A # don't worry about this dataset.
2N/A [[ ${s.name} == ${d.name} ]] && continue
2N/A #
2N/A # It is possible that the destination zonepath already
2N/A # exists and is at least partially populated due to the
2N/A # same zone in some other boot environment. If non-BE
2N/A # datasets already exist, reuse them.
2N/A #
2N/A if /usr/sbin/zfs list "$newdsname" >/dev/null 2>&1; then
2N/A vlog "Not cloning %s: dataset already exists" \
2N/A "$newdsname"
2N/A continue
2N/A fi
2N/A ;;
2N/A esac
2N/A
2N/A if (( clone_made == 0 )); then
2N/A zfs list "$newdsname" >/dev/null 2>&1
2N/A if (( $? == 0 && clone_reqd )); then
2N/A error "$e_dataset_exists" "$newdsname"
2N/A return 1
2N/A fi
2N/A vlog "Cloning $snap to $newdsname"
2N/A zfs clone "${sl_opt[@]}" "$snap" "$newdsname" ||
2N/A return 1
2N/A EXIT_CODE=$ZONE_SUBPROC_FATAL
2N/A (( clone_made=1 ))
2N/A fi
2N/A
2N/A #
2N/A # Force the zone's rpool to be zoned and everything else
2N/A # to inherit the zoned property.
2N/A #
2N/A init_dataset newdss "$newdsname"
2N/A if [[ $newdsname == "${d.rpool_ds}" ]]; then
2N/A zfs_set zoned=on newdss || return 1
2N/A else
2N/A zfs inherit zoned "$newdsname" || return 1
2N/A fi
2N/A #
2N/A # Locally set properties to match those found on the source.
2N/A #
2N/A typeset prop
2N/A zoned_src=${newdss.props[zoned].source}
2N/A for prop in mountpoint canmount; do
2N/A if [[ "${dss.props[$prop].source}" == \
2N/A @(local|received) ]]; then
2N/A zfs_set $prop="${dss.props[$prop].value}" \
2N/A newdss || return 1
2N/A fi
2N/A done
2N/A done
2N/A
2N/A if (( xpool )); then
2N/A zfs create "${d.path.ds}/clonetmp" || return 1
2N/A /usr/sbin/zfs send -rc "${s.rpool_ds}@$snapname" |
2N/A /usr/sbin/zfs recv -Fu "${d.path.ds}/clonetmp/rpool"
2N/A if (( $? != 0 )); then
2N/A error "$e_ds_copy_failed" "${s.rpool_ds}@$snapname" \
2N/A "${d.path.ds}/clonetmp/rpool"
2N/A zfs destroy -rd "${s.rpool_ds}@$snapname" ||
2N/A log "$m_manual_snapshot_cleanup" \
2N/A "${s.rpool_ds}@$snapname"
2N/A zfs destroy -r "${d.path.ds}/clonetmp" &&
2N/A EXIT_CODE=$ZONE_SUBPROC_TRYAGAIN
2N/A return 1
2N/A fi
2N/A
2N/A rationalize_datasets d "${d.path.ds}/clonetmp/rpool"
2N/A if (( $? != 0 )); then
2N/A zfs destroy -rd "${s.rpool_ds}@$snapname" ||
2N/A log "$m_manual_snapshot_cleanup" \
2N/A "${s.rpool_ds}@$snapname"
2N/A zfs destroy -r "${d.path.ds}/clonetmp" &&
2N/A EXIT_CODE=$ZONE_SUBPROC_TRYAGAIN
2N/A return 1
2N/A fi
2N/A
2N/A zfs destroy "${d.path.ds}/clonetmp" || \
2N/A fatal "Failed to destroy temporary dataset."
2N/A
2N/A # Clean up snapshots on the destination.
2N/A zfs destroy -rd "${d.rpool_ds}@$snapname"
2N/A fi
2N/A
2N/A # Remount the source zone. Just complain if it can't be remounted
2N/A # as the next boot, clone, etc. will succeed even if it's not mounted.
2N/A mount_active_be -c s || log "$e_mount1_failed" "${s.name}"
2N/A
2N/A # Mount the new zone
2N/A [[ -d ${d.root} ]] || mkdir -m 755 "${d.root}"
2N/A mount_active_be -c d || return 1
2N/A
2N/A # Perform a deferred destruction of snapshots. Any snapshot that
2N/A # wasn't cloned will be immediately destroyed.
2N/A zfs destroy -rd "${s.rpool_ds}@$snapname" || return 1
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# initializes a new dataset structure
2N/A#
2N/A# Example:
2N/A# typeset dss
2N/A# init_dataset dss rpool
2N/A# print "${dss.name} mounted at ${dss.props[mountpoint].value}"
2N/A#
2N/A# After calling init_dataset, dss looks like
2N/A#
2N/A# dss.name=rpool
2N/A# dss.props[mountpoint].value=/rpool
2N/A# dss.props[mountpoint].source=local
2N/A# ...
2N/A#
2N/A# Returns 0 if one or more properties were found on the dataset, else 1.
2N/A#
2N/Afunction init_dataset {
2N/A typeset -n dss="$1"
2N/A dss="$2"
2N/A dss.name="$2"
2N/A dss.props=
2N/A typeset -A dss.props
2N/A
2N/A refresh_dataset dss
2N/A return $?
2N/A}
2N/A
2N/Afunction refresh_dataset {
2N/A typeset -n dss="$1"
2N/A typeset -r tab="$(printf "\t")"
2N/A typeset prop src val
2N/A typeset -i rv=1
2N/A
2N/A /usr/sbin/zfs get -Hp -o property,source,value all "${dss.name}" \
2N/A | while IFS=$tab read prop src val; do
2N/A dss.props[$prop].value="$val"
2N/A dss.props[$prop].source="$src"
2N/A (( rv=0 ))
2N/A done
2N/A (( rv == 0 )) || error "refresh of ${dss.name} failed"
2N/A return $rv
2N/A}
2N/A
2N/A#
2N/A# init_zfs_fs varname [path]
2N/A#
2N/A# Allocate a new zfs_fs structure
2N/A#
2N/Afunction init_zfs_fs {
2N/A typeset -n ref=$1
2N/A ref=
2N/A ref.ds=
2N/A
2N/A # When this variable is set to a value, cache the dataset
2N/A function ref.set {
2N/A get_ds_from_path "${.sh.value}" ${.sh.name}.ds
2N/A }
2N/A [[ -n "$2" ]] && ref="$2"
2N/A}
2N/A
2N/A#
2N/A# init_zone zone zonename [zonepath]
2N/A#
2N/A# Initialize a zone structure with the following useful members.
2N/A#
2N/A# brand The zone's brand.
2N/A# path The zonepath. See -p option below.
2N/A# path.ds The zonepath dataset name. Automatically updated when zonepath
2N/A# is updated if a dataset is mounted on the zonepath. This
2N/A# member should not be updated directly.
2N/A# root Read-only. The zoneroot. Automatically derived from zonepath.
2N/A# rpool_ds Read-only. The name of the dataset that contains the zone
2N/A# rpool. Automatically derived from path.ds
2N/A# ROOT_ds Read-only. The name of the dataset that contains boot
2N/A# environments. Automatically derived from path.ds
2N/A# new_be_datasets List of datasets that will be created when a new empty
2N/A# boot environment is created. For example, if each BE should
2N/A# get a separate /var, this list will contain one element: var.
2N/A#
2N/A# Other members are commonly initialized as needed by other functions. For
2N/A# example,
2N/A#
2N/A# active_ds The name of the dataset that should be mounted on the zone
2N/A# root. This is updated by brand-specific get_active_be() and
2N/A# set_active_be() functions.
2N/A# allowed_bes During attach, this associative array may be initialized
2N/A# to signal set_active_be() that it can only choose from these
2N/A# boot environments when deciding on which one to make active.
2N/A#
2N/A# Options and arguments:
2N/A#
2N/A# zone The name of the variable that will contain the structure.
2N/A# zonename The name of the zone.
2N/A# zonepath The zonepath. If this option is not provided, the value for
2N/A# zonepath will be looked up in the zone configuration, if it
2N/A# exists.
2N/A#
2N/Afunction init_zone {
2N/A #
2N/A # Argument and option processing
2N/A #
2N/A typeset opt
2N/A
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" zone
2N/A [[ -z "$2" ]] && fail_internal "$f_int_missing_arg" zonename
2N/A typeset -n ref=$1
2N/A ref=$2
2N/A ref.name=$2
2N/A shift 2
2N/A
2N/A ref.path=
2N/A init_zfs_fs ref.path
2N/A
2N/A # Called after init_zfs_fs to make use of discipline function.
2N/A [[ -n $1 ]] && ref.path=$1
2N/A
2N/A #
2N/A # Set up remaining members
2N/A #
2N/A if [[ -z "${ref.path}" ]]; then
2N/A set -- $(zonecfg -z "$ref" info zonepath 2>/dev/null)
2N/A ref.path=$2
2N/A fi
2N/A set -- $(zonecfg -z "$ref" info brand 2>/dev/null)
2N/A ref.brand=$2
2N/A
2N/A # root is always zonepath/root
2N/A typeset -r ref.root=
2N/A function ref.root.get {
2N/A typeset -n pathref=${.sh.name%.root}.path
2N/A .sh.value="$pathref/root"
2N/A }
2N/A
2N/A # rpool dataset is always zonepath_ds/rpool
2N/A typeset -r ref.rpool_ds=
2N/A function ref.rpool_ds.get {
2N/A typeset -n pathdsref=${.sh.name%.rpool_ds}.path.ds
2N/A if [[ -z "$pathdsref" ]]; then
2N/A .sh.value=
2N/A else
2N/A .sh.value="$pathdsref/rpool"
2N/A fi
2N/A }
2N/A
2N/A # ROOT dataset is always zonepath_ds/rpool/ROOT
2N/A typeset -r ref.ROOT_ds=
2N/A function ref.ROOT_ds.get {
2N/A typeset -n pathdsref=${.sh.name%.ROOT_ds}.path.ds
2N/A if [[ -z "$pathdsref" ]]; then
2N/A .sh.value=
2N/A else
2N/A .sh.value="$pathdsref/rpool/ROOT"
2N/A fi
2N/A }
2N/A
2N/A # If a new empty BE is created, which datasets should be in it?
2N/A # This list may be overridden.
2N/A set -A ref.new_be_datasets var
2N/A}
2N/A
2N/A#
2N/A# bind_legacy_zone_globals zone
2N/A#
2N/A# Generates the commands to bind legacy globals to a specific zone's members.
2N/A# Output should be passed to eval.
2N/A#
2N/A# Example:
2N/A#
2N/A# typeset zone=
2N/A# init_zone zone z1
2N/A# eval $(bind_legacy_zone_globals zone)
2N/A#
2N/Afunction bind_legacy_zone_globals {
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" zone
2N/A cat <<-EOF
2N/A typeset -n ZONENAME="$1.name";
2N/A typeset -n ZONEPATH="$1.path";
2N/A typeset -n ZONEPATH_DS="$1.path.ds";
2N/A typeset -n ZONEROOT="$1.root";
2N/A typeset -n ACTIVE_DS="$1.active_ds";
2N/A EOF
2N/A}
2N/A
2N/A#
2N/A# a_push array_name item ...
2N/A#
2N/A# Push item(s) onto an index array
2N/A#
2N/Afunction a_push {
2N/A typeset -n array=$1
2N/A typeset -i len=${#array[@]}
2N/A shift;
2N/A typeset item
2N/A for item in "$@"; do
2N/A array[len++]="$item"
2N/A done
2N/A}
2N/A
2N/A#
2N/A# get_sorted_subscripts associative_array_name indexed_array_name
2N/A#
2N/A# The specification for ksh93 is silent about the order of ${!array[@]}.
2N/A# This function provides a guaranteed way to get the subscripts of an
2N/A# associative array in order.
2N/A#
2N/A# Example:
2N/A# typeset -A a_array
2N/A# typeset -a i_array
2N/A# a_array[foo/bar]=stuff
2N/A# a_array[foo]=otherstuff
2N/A# get_sorted_subscripts a_array i_array
2N/A# for subscript in "${i_array[@]}"; do
2N/A# print "a_array[$subscript] = ${a_array[$subscript]}"
2N/A# done
2N/A#
2N/Afunction get_sorted_subscripts {
2N/A typeset -n a="$1" i="$2"
2N/A
2N/A set -s -- "${!a[@]}"
2N/A set -A i "$@"
2N/A}
2N/A
2N/A#
2N/A# zfs_set property=value dss
2N/A#
2N/A# Sets the property, or generates a clear message that it can't.
2N/A#
2N/A# Arguments:
2N/A# property=value Passed directly to "zfs set". dss.props[property].* is
2N/A# updated.
2N/A# dss The name of a dataset structure, initialized with
2N/A# init_dataset.
2N/A#
2N/A# Example:
2N/A# typeset dss
2N/A# init_dataset dss "zones/z1/rpool"
2N/A# zfs_set zoned=on dss
2N/A#
2N/A# Returns 0 on succes, else 1
2N/A#
2N/Afunction zfs_set {
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" "prop=value"
2N/A [[ -z "$2" ]] && fail_internal "$f_int_missing_arg" "dataset"
2N/A typeset propval="$1"
2N/A typeset -n dss="$2" # dataset structure
2N/A
2N/A [[ -z "${dss.name}" ]] && fail_internal "uninitialized ds"
2N/A
2N/A vlog " setting ZFS property %s on %s" "$propval" "${dss.name}"
2N/A /usr/sbin/zfs set "$propval" "${dss.name}" || {
2N/A error "$e_zfs_set" "$propval" "${dss.name}"
2N/A return 1
2N/A }
2N/A
2N/A #
2N/A # Update the property on the dataset. Note that setting some
2N/A # properties (e.g. zoned) may cause others to change (e.g. mounted), so
2N/A # this is imperfect. It is best to use update_dataset when you really
2N/A # care about getting an accurate snapshot of the properties.
2N/A # update_dataset is not called here to avoid a lot of overhead when
2N/A # the caller has many properties to set.
2N/A #
2N/A dss.props[${propval%%=}].value="${propval#=}"
2N/A dss.props[${propval%%=}].source=local
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# zfs [zfs(1M) args]
2N/A#
2N/A# On its own, zfs(1M) only tells you that something failed, it doesn't tell
2N/A# you anything about the options and arguments that were passed to it. This
2N/A# serves as a wrapper around zfs(1M) to be more verbose about what zfs(1M)
2N/A# failed to do.
2N/A#
2N/A# To avoid unnecessary error messages for the times that zfs failures are
2N/A# expected (e.g. part of a test condition for existence of a dataset), use
2N/A# "/usr/sbin/zfs" instead of "zfs".
2N/A#
2N/Afunction zfs {
2N/A /usr/sbin/zfs "$@"
2N/A typeset -i rv=$?
2N/A (( rv == 0 )) || error "$e_cmd_failed" "zfs $*" $rv
2N/A return $rv
2N/A}
2N/A
2N/A#
2N/A# fix_ds_mountpoint dataset mountpoint
2N/A#
2N/A# Updates the dataset's mountpoint, zoned, and canmount properties so that the
2N/A# dataset is mountable in a zone. If the values in the dataset structure
2N/A# indicate that no changes are needed, no changes are made. If changes are
2N/A# made the dataset structure is refreshed to match the current state of the
2N/A# dataset according to zfs(1M).
2N/A#
2N/A# The dataset must not be mounted when this function is called.
2N/A#
2N/A# Arguments:
2N/A# dataset The name of a dataset structure, initilized with init_dataset.
2N/A# mountpoint The new value for the mountpoint property. For zoned datasets
2N/A# this should be relative to the zone root.
2N/A#
2N/A# Returns 0 on succes, else 1.
2N/A#
2N/Afunction fix_ds_mountpoint {
2N/A case $# in
2N/A 0|1) fail_internal "$f_int_missing_arg" "dataset or dir" ;;
2N/A 2) : ;;
2N/A *) fail_internal "$f_int_bad_arg" "$*" ;;
2N/A esac
2N/A typeset -n dss="$1"
2N/A typeset mountpoint="$2"
2N/A typeset -i dirty=0
2N/A
2N/A #
2N/A # If nothing needs to be fixed, don't fix it.
2N/A #
2N/A if [[ "${dss.props[mountpoint].value}" == "$mountpoint" && \
2N/A "${dss.props[zoned].value}" == on && \
2N/A "${dss.props[zoned].source}" == inherited* && \
2N/A "${dss.props[canmount].value}" == noauto ]]; then
2N/A #
2N/A # Currently we can only verify mountpoints if a dataset is not
2N/A # mounted. The lack of ability to get the persistent value of
2N/A # mountpoint from zfs(1M) is a bit of a problem:
2N/A #
2N/A # - If it is mounted with source of "temporary" we can't get
2N/A # to the persistent value to be sure that it will be mounted
2N/A # at the right place next time.
2N/A # - If the mounted property has a source of "-" and it is
2N/A # zoned, that means one of two things:
2N/A #
2N/A # i) The zone virtual platform must be up and it is mounted
2N/A # relative to the zone root. The mountpoint property
2N/A # that is seen from the global zone includes the
2N/A # zone root. This function doesn't expect to be called
2N/A # to fix a mountpoint in a running zone.
2N/A # ii) It is not mounted and the mountpoint can be trusted.
2N/A #
2N/A [[ "${dss.props[mounted].value}" == no ]] && return 0
2N/A
2N/A #
2N/A # It is mounted, making it impossible or unwise to muck with
2N/A # the mount point.
2N/A #
2N/A error "$e_no_mntpt_change_for_mounted" "${dss.name}"
2N/A return 1
2N/A fi
2N/A
2N/A #
2N/A # We can't fix the mountpoint on a mounted dataset without causing
2N/A # an unmount.
2N/A #
2N/A if [[ "${dss.props[mountpoint].value}" == mounted ]]; then
2N/A error "$e_no_mntpt_change_for_mounted" "${dss.name}"
2N/A return 1
2N/A fi
2N/A
2N/A vlog "$m_fix_ds_mountpoint" "${dss.name}" \
2N/A "${dss.props[mountpoint].value}" "$mountpoint"
2N/A
2N/A # Fix the zoned property if it is not inherited.
2N/A if [[ "${dss.props[zoned].source}" != inherited* ]]; then
2N/A if [[ ${dss.props[zoned].value} != on ]]; then
2N/A vlog "Inheriting property zoned on ${dss.name}"
2N/A zfs inherit zoned "${dss.name}" || return 1
2N/A fi
2N/A (( dirty=1 ))
2N/A fi
2N/A
2N/A #
2N/A # Verify that the value is now zoned. If the parent dataset wasn't
2N/A # zoned then this dataset is not zoned and a basic assumption of the
2N/A # zone dataset structure is broken. Note that we aren't using the
2N/A # cached value in $dss because the zoned property may have changed
2N/A # above.
2N/A #
2N/A typeset zonedval
2N/A zonedval=$(zfs get -H -o value zoned "${dss.name}") || return 1
2N/A if [[ $zonedval != on ]]; then
2N/A error "$e_parent_not_zoned" "${dss.name}"
2N/A return 1
2N/A fi
2N/A # All BE datasets should have canmount=noauto
2N/A typeset cm="${dss.props[canmount].value}"
2N/A if [[ "$cm" != noauto ]]; then
2N/A zfs_set canmount=noauto dss || return 1
2N/A (( dirty=1 ))
2N/A fi
2N/A
2N/A #
2N/A # Now that we are sure that mucking with the mountpoint won't cause
2N/A # mounts in the global zone, update the mountpoint property.
2N/A #
2N/A if [[ "${dss.props[mountpoint].value}" != "$mountpoint" ]]; then
2N/A zfs_set mountpoint="$mountpoint" dss || return 1
2N/A (( dirty=1 ))
2N/A fi
2N/A
2N/A if (( dirty )); then
2N/A refresh_dataset dss || return 1
2N/A fi
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# zfs_tmpmount dataset mountpoint
2N/A#
2N/A# Mount the specified dataset using a ZFS temporary mount. The mountpoint
2N/A# is created if necessary. The zfs mountpoint property must not be "legacy"
2N/A# or "none" and the canmount property must not be "no" for this to succeed.
2N/A#
2N/A# Special protection against devices files is needed for datasets mounted by
2N/A# the global zone that are delegated to non-global zones. The temporary
2N/A# mount option "nodevices" overrides the "devices" zfs property. This
2N/A# provides protection that wouldn't be afforded by simply setting the zfs
2N/A# "devices" property to "off". This is not a concern for datasets that are
2N/A# mounted from within the zone because the zone=<zonename> property implies
2N/A# that device special files are disallowed.
2N/A#
2N/A# Arguments:
2N/A# dataset The name of a dataset. This may be a string or a dataset
2N/A# structure initialized with init_dataset.
2N/A# mountpoint The place where it gets mounted.
2N/A#
2N/A# Returns 0 on success, else 1.
2N/A#
2N/Afunction zfs_tmpmount {
2N/A typeset dsname="$1"
2N/A typeset dir="$2"
2N/A
2N/A vlog "Mounting $dsname at $dir with ZFS temporary mount"
2N/A [[ -d "$dir" ]] || mkdir -m 755 -p "$dir"
2N/A zfs mount -o nodevices,mountpoint="$dir" "$dsname" || {
2N/A error "$e_temp_mount_failed" "$dsname" "$dir"
2N/A return 1
2N/A }
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# mount_be_ds -r root [-m mountpoint] dataset_structure_name
2N/A#
2N/A# Uses the ZFS Temporary Mount feature to mount the specified dataset.
2N/A#
2N/A# -m mountpoint The place where the dataset will be mounted,
2N/A# relative root option. If this value and
2N/A# the mountpoint property in the dataset are
2N/A# in conflict, the dataset will be modified to
2N/A# have its mountpoint set to this value.
2N/A# -r root The root of the the zone. This plus mountpoint
2N/A# (or mountpoint property on dataset) determines
2N/A# where the mount will occur. Required.
2N/A# dataset_structure_name The name of a structure initialized with
2N/A# init_dataset. Before any action is taken,
2N/A# the properties on this dataset will be
2N/A# refreshed to ensure they match the current
2N/A# state of the system.
2N/A#
2N/Afunction mount_be_ds {
2N/A typeset root= mntpt= opt=
2N/A
2N/A #
2N/A # Argument processing
2N/A #
2N/A while getopts :m:r: opt; do
2N/A case "$opt" in
2N/A m) mntpt=$OPTARG ;;
2N/A r) root=$OPTARG ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A [[ -z "$root" ]] && fail_internal "$f_int_missing_opt" r
2N/A [[ -z "$1" ]] && fail_internal "$f_int_missing_arg" dataset
2N/A typeset -n dss=$1
2N/A shift
2N/A (( $# == 0 )) || fail_internal "$f_int_bad_arg" "$*"
2N/A
2N/A vlog "Preparing to mount %s at %s%s" "${dss.name}" "${root}" "${mntpt}"
2N/A
2N/A #
2N/A # Real work
2N/A #
2N/A
2N/A # Verify that all the properties are OK prior to mounting
2N/A refresh_dataset dss || return 1
2N/A
2N/A #
2N/A # Temporary mounts hide the persistent value of the mountpoint
2N/A # property. As such, assume that if it is mounted somewhere under
2N/A # $root, all is well.
2N/A #
2N/A if [[ "${dss.props[mountpoint].source}" == temporary ]]; then
2N/A if [[ -z "$mntpt" \
2N/A && "${dss.props.[mountpoint].value}" == "$root"/* ]]; then
2N/A return 0
2N/A fi
2N/A
2N/A # Ask zfs for an exact match
2N/A [[ "$(get_ds_from_path "${root}${mntpt}")" \
2N/A == "${dss.name}" ]] && return 0
2N/A fi
2N/A
2N/A #
2N/A # Fix up the mountpoint, zoned, and canmount properties
2N/A #
2N/A if [[ -z "$mntpt" ]]; then
2N/A mntpt="${dss.props[mountpoint].value}"
2N/A fi
2N/A fix_ds_mountpoint dss "$mntpt" || {
2N/A error "$e_mount1_failed" "${dss.name}"
2N/A return 1
2N/A }
2N/A
2N/A # Use zfs(1M) to mount it.
2N/A zfs_tmpmount "${dss.name}" "${root}${mntpt}" || return 1
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# mount_be -c root_dataset mountpoint
2N/A#
2N/A# Mounts the specified boot environment at the specified mountpoint.
2N/A# In addition to mounting the root dataset, child datasets that have
2N/A# have canmount=noauto and a path as a mountpoint are mounted.
2N/A#
2N/Afunction mount_be {
2N/A typeset -A dsa
2N/A typeset -A mnt
2N/A typeset line
2N/A typeset -i dscnt=0
2N/A typeset dir
2N/A typeset -i mount_children=0
2N/A typeset zfslist_r # -r for zfs list, if needed
2N/A typeset extra_vlog=
2N/A
2N/A while getopts :c opt; do
2N/A case $opt in
2N/A c) (( mount_children=1 ))
2N/A zfslist_r=-r
2N/A extra_vlog=" (including child datasets)"
2N/A ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG"
2N/A ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A typeset rootds="$1" root="$2"
2N/A
2N/A vlog "Mounting boot environment in $rootds at ${root}${extra_vlog}"
2N/A
2N/A #
2N/A # Find all of the datasets under the root dataset, store them in the dsa
2N/A # associative array. stderr and return value from zfs list command are
2N/A # ignored. Instead, there is a check to be sure that the root dataset
2N/A # was added in the body of the while loop.
2N/A #
2N/A zfs list -H -o name -t filesystem $zfslist_r "$rootds" 2>/dev/null \
2N/A | while read line; do
2N/A dsa["$line"].dss=
2N/A init_dataset "dsa[$line].dss" "$line"
2N/A
2N/A # We know where rootds needs to be mounted, so skip checks.
2N/A [[ $line == "$rootds" ]] && continue
2N/A
2N/A #
2N/A # Be sure mountpoint and canmount are OK. Informational
2N/A # messages are given rather than errors to align with
2N/A # behavior in beadm.
2N/A #
2N/A typeset dir="${dsa[$line].dss.props[mountpoint].value}"
2N/A typeset cm="${dsa[$line].dss.props[canmount].value}"
2N/A
2N/A if [[ "$dir" == legacy || "$dir" == none ]]; then
2N/A log "$m_not_mounting_mountpoint" "$d" "$dir"
2N/A unset dsa[$line]
2N/A continue
2N/A fi
2N/A # If canmount=on it will be set to noauto when it is mounted.
2N/A if [[ $cm == off ]]; then
2N/A log "$m_not_mounting_canmount" "$d" "$cm"
2N/A unset dsa[$line]
2N/A continue
2N/A fi
2N/A done
2N/A
2N/A #
2N/A # Be sure the root dataset was found
2N/A #
2N/A if [[ ${dsa[$rootds].dss.name} != "$rootds" ]]; then
2N/A error "$e_no_such_dataset" "$rootds"
2N/A return 1
2N/A fi
2N/A
2N/A #
2N/A # In most circumstances, only the root gets mounted. However, if the
2N/A # zone is intended to be left in the installed state, such as with
2N/A # sysboot or halt, the entire BE is mounted.
2N/A #
2N/A if (( mount_children == 0 )); then
2N/A if [[ ${dsa[$rootds].dss.props[mountpoint].value} != "$root" \
2N/A || ${dsa[$rootds].dss.props[mounted].value} != yes ]]; then
2N/A mount_be_ds -r "$root" -m / dsa[$rootds].dss || return 1
2N/A else
2N/A vlog "${.sh.fun} $rootds already on $root"
2N/A fi
2N/A return 0
2N/A fi
2N/A
2N/A #
2N/A # Mount the file systems.
2N/A #
2N/A typeset -a umount_on_error
2N/A typeset -i errors=0
2N/A get_sorted_subscripts dsa subs
2N/A for dir in "${subs[@]}"; do
2N/A mount_be_ds -r "$root" "dsa[$dir].dss" || {
2N/A (( errors++ ))
2N/A break
2N/A }
2N/A a_push umount_on_error "$dir"
2N/A done
2N/A
2N/A # If no errors, we are done.
2N/A if (( errors == 0 )); then
2N/A return 0
2N/A fi
2N/A
2N/A # The mount process was not error-free. Unmount whatever was mounted.
2N/A for (( i = ${#umount_on_error[@]} - 1 ; i >= 0 ; i-- )); do
2N/A zfs unmount "${umount_on_error[i]}" \
2N/A || error "e_unmount_failed" "${umount_on_error[i]}"
2N/A done
2N/A return 1
2N/A}
2N/A
2N/A#
2N/A# mount_active_be [-b bootenv] [-c] zoneref
2N/A#
2N/A# Mounts the active boot environment at the zoneroot.
2N/A#
2N/A# Arguments and Options:
2N/A#
2N/A# -b bootenv Set this boot environment as the active boot environment
2N/A# before mounting.
2N/A# -c Mount the complete dataset, including children of the root
2N/A# dataset. If the wrong BE is currently mounted, it will be
2N/A# unmounted first.
2N/A# -C Similar to -c, but the BE that is already partially or fully
2N/A# mounted is assumed to be the correct one. No attempt will
2N/A# be made to unmount the mounted zone root. This should be used
2N/A# when we know that the right BE was already partially mounted
2N/A# and we just need the child datasets to be mounted too.
2N/A# zoneref A zone structure initialized by init_zone
2N/A#
2N/A# Returns 0 on success, else 1
2N/A#
2N/Afunction mount_active_be {
2N/A typeset mount_children= unmount_children= be= opt
2N/A
2N/A while getopts :b:cC opt; do
2N/A case $opt in
2N/A b) be=$OPTARG ;;
2N/A c) mount_children=-c ;;
2N/A C) mount_children=-c
2N/A unmount_children=-C
2N/A ;;
2N/A ?) fail_internal "$f_int_bad_opt" "$OPTARG" ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A typeset -n zone="$1"
2N/A
2N/A if [[ -n $be ]]; then
2N/A set_active_be zone "$be" || return 1
2N/A elif [[ -z "${zone.active_ds}" ]]; then
2N/A get_active_be zone || return 1
2N/A fi
2N/A
2N/A #
2N/A # The unmount is required until such a time as mount_be is able to
2N/A # get the persistent mountpoint property out of zfs(1M).
2N/A #
2N/A unmount_be $unmount_children zone || return 1
2N/A mount_be $mount_children "${zone.active_ds}" \
2N/A "${zone.root}" || return 1
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# unmount_be zoneref
2N/A#
2N/A# Unmounts the zone mounted at zoneref.root. This is expected to be called
2N/A# at a time when the zone has no active virtual platform. As such, it should
2N/A# only have local mounts. In the case of a zone being halted, this function
2N/A# should have no work to do. During sysboot, attach, and clone, this function
2N/A# is likely to unmount all datasets in a BE.
2N/A#
2N/A# Options and Arguments:
2N/A#
2N/A# -C Only unmount ZFS datasets that are children of the dataset
2N/A# mounted on the zone root.
2N/A# zoneref A zone structure initialized by init_zone
2N/A#
2N/A# Returns 0 if everything under the zoneroot was unmounted, else 1
2N/A#
2N/Afunction unmount_be {
2N/A typeset -a mounts
2N/A typeset tab=$(printf "\t")
2N/A typeset dev dir fstype junk opt
2N/A typeset -i zfs_children_only=0
2N/A
2N/A while getopts :C opt; do
2N/A case $opt in
2N/A C) (( zfs_children_only=1 )) ;;
2N/A ?) fail_internal "$f_int_bad_opt" $opt ;;
2N/A esac
2N/A done
2N/A shift $(( OPTIND - 1 ))
2N/A
2N/A typeset -n zone=$1
2N/A typeset root=${zone.root}
2N/A
2N/A [[ -z "$root" ]] && fail_internal "zoneroot is null"
2N/A
2N/A if (( zfs_children_only )) && [[ -z "${zone.active_ds}" ]]; then
2N/A get_active_be zone || return 1
2N/A fi
2N/A
2N/A # Read /etc/mnttab
2N/A while IFS=$tab read dev dir fstype junk; do
2N/A set -- $line
2N/A if (( zfs_children_only )); then
2N/A [[ $fstype != zfs ]] && continue
2N/A [[ $dir == "$root" ]] && continue
2N/A # Do not umount added fs resources
2N/A [[ $dev == ${zone.active_ds}/* ]] || continue
2N/A fi
2N/A if [[ "$dir" == "$root" ]]; then
2N/A a_push mounts "$dir"
2N/A continue
2N/A fi
2N/A if [[ "$dir" == "$root"/* ]]; then
2N/A a_push mounts "$dir"
2N/A continue
2N/A fi
2N/A done < /etc/mnttab
2N/A
2N/A (( ${#mounts[@]} == 0 )) && return 0
2N/A
2N/A # Sort
2N/A set -s -- "${mounts[@]}"
2N/A set -A mounts "$@"
2N/A
2N/A # Unmount in reverse sorted order
2N/A typeset -i i rv=0
2N/A for (( i = ${#mounts[@]} - 1; i >= 0; i-- )); do
2N/A vlog "Unmounting ${mounts[i]}"
2N/A #
2N/A # If a graceful umount fails, it may be an indication that some
2N/A # global zone process is still active on the file system's
2N/A # contents. We should allow the umount to fail so that we
2N/A # don't pull the rug out from a process that may be doing
2N/A # delicate operations in the zone's BE.
2N/A #
2N/A umount "${mounts[i]}" || {
2N/A rv=1
2N/A error "$e_unmount_failed" "${mounts[i]}"
2N/A }
2N/A done
2N/A
2N/A return $rv
2N/A}
2N/A
2N/A#
2N/A# detach_zone zone
2N/A#
2N/A# Unmount the ZBE then copy the zone configuration to SUNWdetached.xml.
2N/A#
2N/A# Arguments:
2N/A#
2N/A# zone A zone structure initialized with init_zone
2N/A#
2N/A# Returns:
2N/A# 0 on success, exits with $ZONE_SUBPROC_TRYAGAIN on error
2N/A#
2N/A
2N/Afunction detach_zone {
2N/A typeset -n zone=$1
2N/A
2N/A unmount_be zone || fail_tryagain "$f_unmount_be"
2N/A
2N/A cp /etc/zones/${zone.name}.xml ${zone.path}/SUNWdetached.xml
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# Determines if any part of the zone's /export hierarchy comes from
2N/A# fs or dataset resources in the zone configuration.
2N/A#
2N/A# Returns 0 (true) if export is provided by the zone configuration.
2N/A#
2N/Afunction zonecfg_has_export {
2N/A typeset -n zone=$1
2N/A typeset dir
2N/A for dir in $(zonecfg -z "${zone.name}" info fs | \
2N/A nawk '$1 == "dir:" { print $2 }' | LC_ALL=C sort); do
2N/A if [[ $dir == /export || $dir == /export/* ]]; then
2N/A log "$m_manual_export_migrate" "${zone.root}/export" \
2N/A "zonecfg fs $dir"
2N/A return 0
2N/A fi
2N/A done
2N/A typeset dsname line
2N/A zonecfg -z "${zone.name}" info dataset | \
2N/A nawk '$1 == "name:" { print $2}' | \
2N/A while read line; do
2N/A zfs list -H -o name,mountpoint "$line" 2>/dev/null
2N/A done | while read dsname dir; do
2N/A if [[ $dir == /export || $dir == /export/* ]]; then
2N/A log "$m_manual_export_migrate" "${zone.root}/export" \
2N/A "zonecfg dataset $dsname"
2N/A return 0
2N/A fi
2N/A done
2N/A
2N/A return 1
2N/A}
2N/A
2N/A#
2N/A# umount_destroy_rmdir dirname dsname
2N/A#
2N/A# Cleans up the specified mount, destroys the dataset, and removes the mount
2N/A# point. Calls error() with a useful message on first failure and returns 1.
2N/A# Returns 0 on success.
2N/A#
2N/Afunction umount_destroy_rmdir {
2N/A typeset dir=$1
2N/A typeset dsname=$2
2N/A
2N/A umount "$dir" || {
2N/A error "$e_unmount_failed" "$dir"
2N/A return 1
2N/A }
2N/A zfs destroy "$dsname" || {
2N/A error "$e_zfs_destroy" "$dsname"
2N/A return 1
2N/A }
2N/A rmdir "$dir" || {
2N/A error "$f_rmdir" "$dir"
2N/A return 1
2N/A }
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# During p2v and v2v, move any contents from the BE's /export to the non-BE
2N/A# /export. Same goes for /export/home.
2N/A#
2N/A# If existing contents are found in .../rpool/export or .../rpool/export/home,
2N/A# a message is displayed indicating manual migration is required and a the
2N/A# return value is 0. If migration is attempted but unsuccessful, the return
2N/A# value is 1. If migration is successful, the return value is 0.
2N/A#
2N/A# In the event that the zone's existing /export and/or /export/home exists but is
2N/A# not a directory (e.g. is a symlink) the corresponding dataset(s) are destroyed
2N/A# so as to not be in the way for any migration. If /export exists and is not
2N/A# a directory, both the export and export/home datasets are destroyed and no
2N/A# migration takes place.
2N/A#
2N/Afunction migrate_export {
2N/A typeset -n zone=$1
2N/A typeset dir
2N/A typeset -i destroy_export=0 destroy_exporthome=0
2N/A
2N/A # If /export doesn't exist or is empty there is no work to do.
2N/A [[ -d ${zone.root}/export ]] || return 0
2N/A [[ -z "$(ls -A ${zone.root}/export)" ]] && return 0
2N/A
2N/A #
2N/A # If zonecfg fs or dataset resources specify a file system to mount
2N/A # anywhere under /export, assume that they don't want /export migrated.
2N/A #
2N/A zonecfg_has_export zone && return 0
2N/A
2N/A #
2N/A # Mount /export and /export home under a temporary directory.
2N/A # Note that it the zone's export dataset is mounted at $dir/export
2N/A # not at $dir to make it so that mv(1) can be used for a very simple
2N/A # migration process.
2N/A #
2N/A dir=$(mktemp -d)
2N/A zfs_tmpmount "${zone.rpool_ds}/export" "$dir/export" || {
2N/A rmdir $dir
2N/A error "$e_export_migration_failed"
2N/A return 1
2N/A }
2N/A zfs_tmpmount "${zone.rpool_ds}/export/home" "$dir/export/home" || {
2N/A umount "$dir/export"
2N/A rmdir "$dir/export" "$dir"
2N/A error "$e_export_migration_failed"
2N/A return 1
2N/A }
2N/A
2N/A #
2N/A # Check to see if the existing .../rpool/export dataset hierarchy
2N/A # contains anything. If so, don't clobber it.
2N/A #
2N/A (cd "$dir" && find export) | \
2N/A nawk '$0 !~ /^(export|export\/home)$/ {exit 1}' || {
2N/A umount "$dir/export/home"
2N/A umount "$dir/export"
2N/A rmdir "$dir/export" 2>/dev/null
2N/A rmdir "$dir"
2N/A log "$m_manual_export_migrate" "${zone.root}/export" \
2N/A "${zone.rpool_ds}/export"
2N/A return 0
2N/A }
2N/A
2N/A #
2N/A # It is possible that /export and/or /export/home exsists but is not a
2N/A # directory. If so, the corresponding dataset should be deleted so
2N/A # that migration (if any) doesn't choke trying to put a directory on
2N/A # top of a symlink or other non-directory.
2N/A #
2N/A if [[ -h "${zone.root}/export/home" ]]; then
2N/A (( destroy_exporthome = 1 ))
2N/A elif [[ -e "${zone.root}/export/home" && \
2N/A ! -d "${zone.root}/export/home" ]]; then
2N/A (( destroy_exporthome = 1 ))
2N/A fi
2N/A
2N/A if [[ -h "${zone.root}/export" ]]; then
2N/A (( destroy_export = 1 ))
2N/A (( destroy_exporthome = 1 ))
2N/A elif [[ -e "${zone.root}/export" && ! -d "${zone.root}/export" ]]
2N/A then
2N/A (( destroy_export = 1 ))
2N/A (( destroy_exporthome = 1 ))
2N/A fi
2N/A
2N/A if (( destroy_exporthome )); then
2N/A umount_destroy_rmdir "$dir/export/home" \
2N/A "${zone.rpool_ds}/export/home" || {
2N/A error "$e_export_migration_failed"
2N/A return 1
2N/A }
2N/A fi
2N/A
2N/A if (( destroy_export )); then
2N/A umount_destroy_rmdir "$dir/export" \
2N/A "${zone.rpool_ds}/export" || {
2N/A error "$e_export_migration_failed"
2N/A return 1
2N/A }
2N/A # Nothing left to migrate to. Finish cleanup and return.
2N/A rmdir $dir
2N/A return 0
2N/A fi
2N/A
2N/A # Odd quoting below to prevent SCCS keyword expansion & warnings.
2N/A typeset bkup
2N/A bkup=${zone.root}/export.backup.$(TZ=UTC date +%Y""%m""%dT""%H""%M""%SZ)
2N/A if [[ -e "$bkup" ]]; then
2N/A #
2N/A # There's no legitimate reason that we should have a
2N/A # collision - this is likely an attack by the provider
2N/A # of the archive.
2N/A #
2N/A umount "$dir/export/home"
2N/A umount "$dir/export"
2N/A rmdir "$dir/export" 2>/dev/null
2N/A rmdir "$dir"
2N/A fatal "$f_backup_dir_exists" "$bkup"
2N/A fi
2N/A
2N/A log "$m_migrating_data" "$(zfs list -H -o name "${zone.root}/export")" \
2N/A "$(zfs list -H -o name "$dir/export")"
2N/A
2N/A #
2N/A # cpio insists on printing the number of blocks transferred on stderr.
2N/A # This output only serves to confuse so prevent it from being displayed
2N/A # if cpio is otherwise successful.
2N/A #
2N/A typeset cpioout rv
2N/A cpioout=$( cd "${zone.root}" && find export | \
2N/A LC_ALL=C cpio -pdumP@/ "$dir" 2>&1)
2N/A rv=$?
2N/A if [[ $cpioout != [0-9]*" blocks" || $rv != 0 ]]; then
2N/A print -- "$cpioout"
2N/A (( destroy_exporthome )) || umount "$dir/export/home"
2N/A umount "$dir/export"
2N/A rmdir "$dir/export" 2>/dev/null
2N/A rmdir "$dir"
2N/A error "$e_export_migration_failed"
2N/A return 1
2N/A fi
2N/A
2N/A mv ${zone.root}/export "$bkup"
2N/A log "$m_backup_saved" /export "/$(basename "$bkup")"
2N/A
2N/A # Migration was successful. Even if a umount fails, still return 0.
2N/A (( destroy_exporthome )) || umount "$dir/export/home"
2N/A umount "$dir/export"
2N/A rmdir "$dir/export" 2>/dev/null
2N/A rmdir "$dir"
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# migrate_rpool zone
2N/A#
2N/A# Migrates the contents of the /rpool directory out of the ZBE into the rpool
2N/A# dataset.
2N/A#
2N/A# Arguments
2N/A#
2N/A# zone A zone data structure initialized with init_zone.
2N/A#
2N/A# Returns
2N/A#
2N/A# 0 on success, 1 on failure.
2N/A#
2N/Afunction migrate_rpool {
2N/A typeset -n zone=$1
2N/A typeset dir
2N/A
2N/A [[ -d ${zone.root}/rpool ]] || return 0
2N/A [[ -z "$(ls -A ${zone.root}/rpool)" ]] && return 0
2N/A
2N/A dir=$(mktemp -d)
2N/A zfs_tmpmount "${zone.rpool_ds}" "$dir/rpool" || {
2N/A rmdir $dir/rpool >/dev/null 2>&1
2N/A rmdir $dir
2N/A error "$e_rpool_migration_failed"
2N/A return 1
2N/A }
2N/A
2N/A typeset bkup
2N/A bkup=${zone.root}/rpool.backup.$(TZ=UTC date +%Y""%m""%dT""%H""%M""%SZ)
2N/A if [[ -e $bkup ]]; then
2N/A #
2N/A # There's no legitimate reason that we should have a
2N/A # collision - this is likely an attack by the provider
2N/A # of the archive.
2N/A #
2N/A umount $dir/rpool
2N/A rmdir $dir/rpool
2N/A rmdir $dir
2N/A fatal "$f_backup_dir_exists" "$bkup"
2N/A fi
2N/A
2N/A log "$m_migrating_data" "$(zfs list -H -o name "${zone.root}/rpool")" \
2N/A "$(zfs list -H -o name "$dir/rpool")"
2N/A
2N/A #
2N/A # cpio insists on printing the number of blocks transferred on stderr.
2N/A # This output only serves to confuse so prevent it from being displayed
2N/A # if cpio is otherwise successful.
2N/A #
2N/A typeset cpioout rv
2N/A cpioout=$( cd "${zone.root}" && find rpool | \
2N/A LC_ALL=C cpio -pdumP@/ "$dir" 2>&1)
2N/A rv=$?
2N/A if [[ $cpioout != [0-9]*" blocks" || $rv != 0 ]]; then
2N/A print -- "$cpioout"
2N/A umount "$dir/rpool"
2N/A rmdir "$dir/rpool"
2N/A rmdir "$dir"
2N/A error "$e_rpool_migration_failed"
2N/A return 1
2N/A fi
2N/A
2N/A mv ${zone.root}/rpool "$bkup"
2N/A log "$m_backup_saved" /rpool "/$(basename "$bkup")"
2N/A
2N/A # Migration was successful. Even if a umount fails, still return 0.
2N/A umount "$dir/rpool"
2N/A rmdir "$dir/rpool"
2N/A rmdir "$dir"
2N/A return 0
2N/A}
2N/A
2N/ACLUSTER_HOOK="/usr/cluster/lib/sc/zc_handler"
2N/A
2N/Afunction call_cluster_hook {
2N/A if [[ -f $CLUSTER_HOOK ]]; then
2N/A $CLUSTER_HOOK "$@"
2N/A return $?
2N/A else
2N/A return $ZONE_SUBPROC_OK
2N/A fi
2N/A}
2N/A
2N/A#
2N/A# start_log zone subcommand [command line]
2N/A#
2N/A# Sets up the environment for logging functions to log to a log file. The log
2N/A# is created as /var/log/zones/zoneadm.<timestamp>.<zone>.<subcommand>.
2N/A# However, see the effect of the ZONEADM_LOGFILE environment variable.
2N/A#
2N/A# Example:
2N/A#
2N/A# init_zone zone "$1" "$2"
2N/A# start_log zone attach "$0" "$@"
2N/A#
2N/A# Arguments
2N/A#
2N/A# zone A zone structure initialized with init_zone.
2N/A# subcommand The subcommand of zoneadm that is running.
2N/A# command line The command line of the script calling this function. This
2N/A# array will be logged to indicate what is running
2N/A#
2N/A# Globals
2N/A#
2N/A# LOGFILE Set to the name of the log file.
2N/A# ZONEADM_LOGFILE If this environment variable is set to a writable file,
2N/A# logging is done to that file. If this environment
2N/A# variable did not already refer to a writable file,
2N/A# it is set to the value of LOGFILE. Thus, in situations
2N/A# where one brand script calls another (perhaps via
2N/A# another invocation of zoneadm), there is a single
2N/A# log file created.
2N/A# LOGGING_COMMAND A string that represents the command line. Used by
2N/A# finish_log().
2N/A# start_log().
2N/A# FINISH_LOG Set to true or false. If ZONEADM_LOGFILE is set
2N/A# to a writable file when start_log() is called,
2N/A# FINISH_LOG is set to false. This affects behavior
2N/A# of finish_log().
2N/A# stderr (fds 2 & 3) stderr is copied to file descriptor 3, then stderr
2N/A# is redirected for append to $LOGFILE.
2N/A#
2N/Afunction start_log {
2N/A typeset subcommand zonename
2N/A
2N/A (( $# < 2 )) && fail_internal "Too few arguments to start_log"
2N/A typeset -n zone=$1
2N/A typeset subcommand=$2
2N/A typeset zonename=${zone.name}
2N/A shift 2
2N/A LOGGING_COMMAND="$*"
2N/A
2N/A [[ -z $zonename ]] && fail_internal "zone structure not initialized"
2N/A
2N/A if [[ -n $ZONEADM_LOGFILE && -f $ZONEADM_LOGFILE &&
2N/A -w $ZONEADM_LOGFILE ]]; then
2N/A #
2N/A # Some other script that called this one already set things
2N/A # up. Continue to use existing $ZONEADM_LOGFILE as $LOGFILE.
2N/A #
2N/A FINISH_LOG=false
2N/A LOGFILE=$ZONEADM_LOGFILE
2N/A else
2N/A if [[ ! -d /var/log/zones ]]; then
2N/A mkdir -m 755 /var/log/zones ||
2N/A fatal "$f_mkdir" /var/log/zones
2N/A fi
2N/A FINISH_LOG=true
2N/A
2N/A #
2N/A # Use a subshell to set noclobber, then try to create
2N/A # a unique log file without the ugly file name generated
2N/A # by mktemp.
2N/A #
2N/A typeset name timestamp
2N/A timestamp=$(TZ=GMT date +"%Y""%m""%dT""%H""%M""%"SZ)
2N/A name=/var/log/zones/zoneadm.$timestamp.$zonename.$subcommand
2N/A LOGFILE=$(set -o noclobber
2N/A try=$name
2N/A i=0
2N/A while (( i++ < 100 )); do
2N/A exec 2>$try
2N/A if (( $? == 0 )); then
2N/A print "$try"
2N/A break
2N/A fi
2N/A try=$name.$try
2N/A done)
2N/A [[ -z $LOGFILE || ! -f $LOGFILE ]] &&
2N/A fail_internal "Cannot create unique log file"
2N/A ZONEADM_LOGFILE=$LOGFILE
2N/A fi
2N/A
2N/A #
2N/A # Before redirecting stderr to $LOGFILE, save a copy of it to fd 3
2N/A # so that it can be restored by finish_log().
2N/A #
2N/A exec 3>&2
2N/A exec 2>>$LOGFILE
2N/A
2N/A vlog "==== Starting: %s ====" "$*"
2N/A [[ $FINISH_LOG == true ]] && log "$m_log_progress_to" "$LOGFILE"
2N/A}
2N/A#
2N/A# The following export is performed in the global scope to force the
2N/A# environment variable to exist. If it is exported from within the function,
2N/A# it just puts it into the global scope and not into the environment.
2N/A#
2N/Aexport ZONEADM_LOGFILE
2N/A
2N/A#
2N/A# finish_log zone [logfilevar]
2N/A#
2N/A# Finish logging started by start_log().
2N/A#
2N/A# Arguments
2N/A#
2N/A# zone A zone structure initialized with init_zone.
2N/A# logfilevar The name of a variable to contain the name of the
2N/A# resulting log file.
2N/A#
2N/A# Globals
2N/A#
2N/A# LOGFILE The name of the log file. Unset before return.
2N/A# ZONEADM_LOGFILE If FINISH_LOG is true, this environment variable
2N/A# is unset before return.
2N/A# LOGGING_COMMAND The command that is being logged. Set by start_log().
2N/A# FINISH_LOG If set to true and ${zone.root}/var/log exists as a
2N/A# directory, $LOGFILE is copied to the same path in the
2N/A# zone. If necessary, /var/log/zones is created in the
2N/A# zone.
2N/A#
2N/Afunction finish_log {
2N/A typeset -n zone=$1
2N/A typeset newlog
2N/A typeset logfile=$LOGFILE
2N/A
2N/A [[ -z $LOGFILE ]] && return
2N/A
2N/A vlog "==== Completed: %s ====" "$LOGGING_COMMAND"
2N/A
2N/A # Stop logging to $LOGFILE and restore stderr.
2N/A exec 2<&3
2N/A exec 3<&-
2N/A unset LOGFILE
2N/A [[ $FINISH_LOG == true ]] || return
2N/A
2N/A #
2N/A # If the operation ended such that there is no zone mounted
2N/A # (e.g. uninstall, failed install, etc.) do not attempt to copy
2N/A # it into the zone.
2N/A #
2N/A [[ -z ${zone.root} ]] && return
2N/A [[ -d ${zone.root}/var/log ]] || return
2N/A safe_dir /var
2N/A safe_dir /var/log
2N/A
2N/A if [[ ! -d ${zone.root}/var/log/zones ]]; then
2N/A # If the log file can't be safely copied into the zone,
2N/A # give up on copying it there.
2N/A if [[ -e ${zone.root}/var/log/zones ||
2N/A -h ${zone.root}/var/log/zones ]]; then
2N/A error "$e_baddir" /var/log/zones
2N/A return
2N/A fi
2N/A mkdir -m 755 ${zone.root}/var/log/zones ||
2N/A fatal "$f_mkdir" ${zone.root}/var/log/zones
2N/A fi
2N/A
2N/A safe_copy "$logfile" "${zone.root}${logfile}"
2N/A if [[ -n $2 ]]; then
2N/A typeset -n out=$2
2N/A out=$logfile
2N/A fi
2N/A log "$m_log_copied_to" "${zone.root}$logfile"
2N/A}
2N/A
2N/A#
2N/A# pin_datasets topds
2N/A#
2N/A# Keeps track of which decendants of topds exist at a point in time by
2N/A# tracking the guid property of each dataset. Note that as datasets
2N/A# are renamed, the guid stays the same and as such datasets stay pinned
2N/A# across renames. See also delete_unpinned_datasets() and unpin_datasets().
2N/A#
2N/A# Arguments
2N/A#
2N/A# topds The name of the top dataset to pin. Most likely this is
2N/A# a zonepath dataset.
2N/A#
2N/A# Globals
2N/A#
2N/A# DATASET_PINS An associative array mapping the guid property
2N/A# to the name of pinned dataset. The name is
2N/A# not actually important - it is an arbitrary
2N/A# value assigned to the array element.
2N/A#
2N/A# Return
2N/A# 0 Success - at least one dataset is pinned.
2N/A# 1 Failure
2N/A#
2N/Aunset DATASET_PINS
2N/Atypeset -A DATASET_PINS
2N/Afunction pin_datasets {
2N/A typeset topdsn=$1
2N/A typeset guid dsn
2N/A typeset retval=1
2N/A
2N/A vlog "Pinning datasets under %s" "$topdsn"
2N/A
2N/A /usr/sbin/zfs get -Hrp -o value,name guid "$topdsn" 2>/dev/null |
2N/A while IFS=$'\t' read guid dsn; do
2N/A vlog "Pinning %s" "$dsn"
2N/A DATASET_PINS[$guid]="$dsn"
2N/A retval=0
2N/A done
2N/A
2N/A return $retval
2N/A}
2N/A
2N/A#
2N/A# unpin_datasets topds
2N/A#
2N/A# Undoes the work of pin_datasets() for all datasets that are descendants
2N/A# of topds.
2N/A#
2N/A# Arguments
2N/A#
2N/A# topds The name of the top dataset to pin. Most likely this is
2N/A# a zonepath dataset.
2N/A#
2N/A# Globals
2N/A#
2N/A# DATASET_PINS An associative array mapping the guid property
2N/A# to the name of pinned dataset.
2N/A#
2N/A# Return
2N/A# 0 Success
2N/A# 1 Failure - nothing was unpinned
2N/A#
2N/Afunction unpin_datasets {
2N/A typeset topdsn=$1
2N/A typeset retval=1
2N/A vlog "Unpinning datasets under %s" "$topdsn"
2N/A
2N/A /usr/sbin/zfs get -Hrp -o value,name guid "$topdsn" 2>/dev/null |
2N/A while IFS=$'\t' read guid dsn; do
2N/A [[ -z ${DATASET_PINS[$guid]} ]] && continue
2N/A vlog "Unpinning %s" "$dsn"
2N/A unset DATASET_PINS[$guid]
2N/A retval=0
2N/A done
2N/A
2N/A return $retval
2N/A}
2N/A
2N/A#
2N/A# delete_unpinned_datasets topds
2N/A#
2N/A# Deletes each dataset under topds that is not pinned by pin_datasets(). As a
2N/A# safety measure, if topds is not pinned, nothing will be deleted and 1 will be
2N/A# returned.
2N/A#
2N/A# Note: Before destroying any datasets, the current working directory is
2N/A# changed to / to avoid EBUSY on dataset unmount.
2N/A#
2N/A# Arguments
2N/A#
2N/A# topds The name of the top dataset to pin. Most likely this is
2N/A# a zonepath dataset.
2N/A#
2N/A# Globals
2N/A#
2N/A# DATASET_PINS An associative array mapping the guid property
2N/A# to the name of pinned dataset.
2N/A#
2N/A# Return
2N/A# 0 Success
2N/A# 1 Failure or partial failure.
2N/A#
2N/Afunction delete_unpinned_datasets {
2N/A typeset topdsn=$1
2N/A typeset -i ispinned=0
2N/A
2N/A vlog "Destroying datasets under %s that are not pinned" "$topdsn"
2N/A
2N/A # Avoid EBUSY during the umount(2) performed by 'zfs destroy'.
2N/A cd /
2N/A
2N/A typeset name guid
2N/A typeset -A todestroy
2N/A /usr/sbin/zfs get -Hrp -o value,name guid "$topdsn" 2>/dev/null |
2N/A while IFS=$'\t' read guid name; do
2N/A
2N/A # Be sure it is pinned before allowing anything to be deleted.
2N/A if [[ $name == $topdsn ]]; then
2N/A if [[ -z ${DATASET_PINS[$guid]} ]]; then
2N/A error "$e_not_pinned" "$topdsn"
2N/A return 1
2N/A fi
2N/A (( ispinned=1 ))
2N/A fi
2N/A
2N/A # Do not destroy pinned datasets.
2N/A [[ -n ${DATASET_PINS[$guid]} ]] && continue
2N/A
2N/A # To minimize the chance of snapshot collisions during clone
2N/A # promotion, remove all snapshots that we can ASAP.
2N/A if [[ $name == *@* ]]; then
2N/A /usr/sbin/zfs destroy "$name" >/dev/null 2>&1
2N/A if (( $? == 0 )); then
2N/A vlog "Destroyed unpinned snapshot %s" "$name"
2N/A continue
2N/A fi
2N/A else
2N/A # No need to add snapshots to the todestroy list.
2N/A # They will get cleaned up when the clones are
2N/A # destroyed.
2N/A todestroy["$name"]=$guid
2N/A fi
2N/A done
2N/A
2N/A # If no work to be done, return immediately.
2N/A (( ${#todestroy[@]} == 0 )) && return 0
2N/A
2N/A #
2N/A # Be sure that if there is anything to do that it is pinned. If
2N/A # we detect that it is not pinned at this point, it means there is
2N/A # a logic error.
2N/A #
2N/A (( ispinned == 0 )) && fail_internal "$e_not_pinned" "$topdsn"
2N/A
2N/A # If the uninstall functions aren't already loaded, load them
2N/A typeset -f destroy_zone_dataset >/dev/null 2>&1 ||
2N/A . /usr/lib/brand/shared/uninstall.ksh
2N/A
2N/A #
2N/A # Destroy the datasets in reverse order. Because of clones that
2N/A # exist within the received datasets, there may be some failures.
2N/A # Don't worry about that so long as each iteration makes progress.
2N/A #
2N/A while (( ${#todestroy[@]} != 0 )); do
2N/A typeset progress=false
2N/A typeset -a names
2N/A typeset -i i
2N/A get_sorted_subscripts todestroy names
2N/A for (( i=$((${#names[@]} - 1)); i >= 0; i-- )); do
2N/A name=${names[$i]}
2N/A guid=${todestroy[$name]}
2N/A #
2N/A # This is icky: destroy_zone_dataset exits the process
2N/A # if it is not successful. If it succeeds, it does not
2N/A # necessarily return 0 and as such any return is
2N/A # considered a successful return. To avoid this
2N/A # pitfall a subshell is used. First, any existing exit
2N/A # handler is disabled for the subshell. If
2N/A # destroy_zone_dataset fails, the return code from the
2N/A # subshell will be the exit value specified within
2N/A # uninstall.ksh. If it succeeds, the return code from
2N/A # the subshell will be 0.
2N/A #
2N/A (
2N/A trap - EXIT
2N/A destroy_zone_dataset "$name" >/dev/null
2N/A exit 0
2N/A ) || continue
2N/A vlog "Destroyed unpinned dataset %s" "$name"
2N/A progress=true
2N/A unset todestroy[$name]
2N/A unset DATASET_PINS[$guid]
2N/A done
2N/A
2N/A if [[ $progress != true ]]; then
2N/A for name in "${names[@]}"; do
2N/A log "$e_destroy_unpinned" "$name"
2N/A done
2N/A return 1
2N/A fi
2N/A done
2N/A
2N/A return 0
2N/A}
2N/A
2N/A#
2N/A# Zoneadmd writes a one-line index file into the zone when the zone boots,
2N/A# so any information about installed zones from the original system will
2N/A# be lost at that time. Here we'll warn the sysadmin about any pre-existing
2N/A# zones that they might want to clean up by hand, but we'll leave the zonepaths
2N/A# in place in case they're on shared storage and will be migrated to
2N/A# a new host.
2N/A#
2N/Afunction warn_zones {
2N/A typeset zoneconfig=$ZONEROOT/etc/zones
2N/A
2N/A [[ ! -d $zoneconfig ]] && return
2N/A
2N/A if [[ -h $zoneconfig/index || ! -f $zoneconfig/index ]]; then
2N/A error "$e_badfile" "/etc/zones/index"
2N/A return
2N/A fi
2N/A
2N/A # Read the zone's /etc/zones/index and remember important parts.
2N/A typeset -A zones
2N/A typeset name state path uuid
2N/A cat $zoneconfig/index | while IFS=: read name state path uuid; do
2N/A [[ $name == global ]] && continue
2N/A [[ $state == installed ]] || continue
2N/A zones[$name]=( path=$path state=$state )
2N/A done
2N/A
2N/A # Return if there are no installed zones to warn about.
2N/A (( ${#zones[@]} == 0 )) && return
2N/A
2N/A #
2N/A # Special case for having only one zone: Per above, the zone is
2N/A # installed and the zone name is not global. If the zonepath is /,
2N/A # this image is a part of a V2V operation.
2N/A #
2N/A if (( ${#zones[@]} == 1 )); then
2N/A name=${!zones[*]}
2N/A [[ ${zones[$name].path} == / ]] && return
2N/A fi
2N/A
2N/A # Log messages about nested zones and zonepaths.
2N/A log "$v_rmzones" "${!zones[*]}"
2N/A log "$v_rmzonepaths"
2N/A for name in "${!zones[@]}"; do
2N/A log " %s" "${zones[$name].path}"
2N/A done
2N/A}
2N/A
2N/A# Setup i18n output
2N/ATEXTDOMAIN="SUNW_OST_OSCMD"
2N/Aexport TEXTDOMAIN
2N/A
2N/Ae_cannot_wrap=$(gettext "%s: error: wrapper file already exists")
2N/Ae_baddir=$(gettext "Invalid '%s' directory within the zone")
2N/Ae_badfile=$(gettext "Invalid '%s' file within the zone")
2N/Ae_path_abs=$(gettext "Pathname specified to -a '%s' must be absolute.")
2N/Ae_not_found=$(gettext "%s: error: file or directory not found.")
2N/Ae_install_abort=$(gettext "Installation aborted.")
2N/Ae_not_readable=$(gettext "Cannot read directory '%s'")
2N/Ae_not_dir=$(gettext "Error: must be a directory")
2N/Ae_unsupported_archive=$(gettext "Archive format '%s' not supported by this brand. See %s(5) for supported archive types.")
2N/Ae_absolute_archive=$(gettext "Error: archive contains absolute paths instead of relative paths.")
2N/Ae_mismatch_archive=$(gettext "Error: the archive top-level directory (%s) does not match the zonepath (%s).")
2N/Ae_tmpfile=$(gettext "Unable to create temporary file")
2N/Ae_tmpdir=$(gettext "Unable to create temporary directory %s")
2N/Ae_rmdir=$(gettext "Unable to remove directory %s")
2N/Ae_rm=$(gettext "Unable to remove %s")
2N/Ae_mv=$(gettext "Unable to rename '%s' to '%s'")
2N/Ae_root_full=$(gettext "Zonepath root %s exists and contains data; remove or move aside prior to install.")
2N/Ae_temp_mount_failed=$(gettext "ZFS temporary mount of %s on %s failed.")
2N/Ae_no_such_dataset=$(gettext "Error: %s: No such dataset.")
2N/Ae_ds_mnt_multiply_defined=$(gettext "Error: multiple datasets list %s as mountpoint.")
2N/Ae_unmount_failed=$(gettext "unable to unmount %s.")
2N/Ae_mount1_failed=$(gettext "Error: could not mount %s.")
2N/Ae_parent_not_zoned=$(gettext "Error: parent dataset of %s is not zoned.")
2N/Ae_export_migration_failed=$(gettext "Error: migration of /export from active boot environment to the zone's\nrpool/export dataset failed. Manual cleanup required.")
2N/Ae_rpool_migration_failed=$(gettext "Error: migration of data in /rpool from active boot environment to the zone's\nrpool dataset failed. Manual cleanup required.")
2N/Ae_zfs_destroy=$(gettext "Error: cannot destroy dataset %s")
2N/Ae_file_conflict=$(gettext "Received file %s collides in datasets %s.")
2N/Ae_ds_conflict=$(gettext "Received dataset %s collides with existing dataset %s.")
2N/Ae_unexpected_ds=$(gettext "Unexpected dataset %s found in receive stream.")
2N/Ae_ds_copy_failed=$(gettext "Failed to copy datasets from %s to %s")
2N/Ae_be_move_failed=$(gettext "Failed to move be dataset %s.")
2N/Af_no_ds=$(gettext "The zonepath must be a ZFS dataset.\nThe parent directory of the zonepath must be a ZFS dataset so that the\nzonepath ZFS dataset can be created properly.")
2N/Ae_no_active_be=$(gettext "Error: No active boot environment found.")
2N/Ae_no_mntpt_change_for_mounted=$(gettext "Error: Cannot change mountpoint because %s is mounted")
2N/Ae_zfs_set=$(gettext "Error: Cannot set zfs property %s on %s")
2N/Ae_zfs_inherit=$(gettext "Error: Cannot inherit zfs property %s on %s")
2N/Ae_cmd_failed=$(gettext "Error: Command <%s> exited with status %d")
2N/Ae_not_pinned=$(gettext "Dataset %s is not pinned")
2N/Ae_destroy_unpinned=$(gettext "Unable to destroy unpinned dataset '%s'.")
2N/Ae_dataset_exists=$(gettext "Dataset '%s' already exists")
2N/As10_zbe_not_supported=$(gettext "Solaris 10 style boot environments not supported by this brand.")
2N/Af_mkdir=$(gettext "Unable to create directory %s.")
2N/Af_chmod=$(gettext "Unable to chmod directory %s.")
2N/Af_chown=$(gettext "Unable to chown directory %s.")
2N/Af_rmdir=$(gettext "Unable to remove directory %s.")
2N/Af_hwcap_info=$(gettext "HWCAP: %s\n")
2N/Af_sanity_hwcap=$(gettext \
2N/A"The image was created with an incompatible libc.so.1 hwcap lofs mount.\n"\
2N/A" The zone will not boot on this platform. See the zone's\n"\
2N/A" documentation for the recommended way to create the archive.")
2N/Af_int_bad_opt=$(gettext "Internal error: bad option -%s")
2N/Af_int_missing_opt=$(gettext "Internal error: missing option -%s")
2N/Af_int_missing_arg=$(gettext "Internal error: missing argument %s")
2N/Af_int_bad_opt_combo=$(gettext "Internal error: incompatible options -%s and %-s")
2N/Af_int_bad_arg=$(gettext "Internal error: extra argument %s")
2N/Af_mount=$(gettext "Error: error mounting zone root dataset.")
2N/Af_ds_config=$(gettext "Failed to configure dataset %s: could not set %s.")
2N/Af_backup_dir_exists=$(gettext "Backup directory %s already exists.")
2N/Af_zfs_snapshot=$(gettext "Failed to snapshot source zone.")
2N/Af_zone_clone=$(gettext "Failed to clone zone.")
2N/Af_zfs_create=$(gettext "Failed to create dataset %s.")
2N/Af_zfs_snapshot_of=$(gettext "Failed to create snapshot of %s.")
2N/Af_detach_convert=$(gettext "Conversion of detached datasets failed.")
2N/Af_mount_active_be=$(gettext "Unable to mount zone root dataset.")
2N/Af_unmount_be=$(gettext "Unable to unmount boot environment.")
2N/Af_pin=$(gettext "Failed to mark existing datasets for preservation.")
2N/Af_unpin=$(gettext "Failed to remove preservation mark from pre-existing datasets.")
2N/Af_invalid_data=$(gettext "Invalid data received")
2N/A
2N/Am_interrupt=$(gettext "Cleaning up due to interrupt. Please be patient.")
2N/Am_brnd_usage=$(gettext "brand-specific usage: ")
2N/Am_analyse_archive=$(gettext "Analysing the archive")
2N/Am_fix_ds_mountpoint=$(gettext "Changing mountpoint of dataset %s from %s to %s.")
2N/Am_not_mounting_mountpoint=$(gettext "Not mounting %s because mountpoint is '%s'.")
2N/Am_not_mounting_canmount=$(gettext "Not mounting %s because canmount is '%s'.")
2N/Am_manual_export_migrate=$(gettext "Manual migration of export required. Potential conflicts in\n%s and %s.")
2N/Am_backup_saved=$(gettext "A backup copy of %s is stored at %s.\nIt can be deleted after verifying it was migrated correctly.")
2N/Am_migrating_data=$(gettext "Migrating data\n\tfrom: %s\n\t to: %s")
2N/Am_convert_detached=$(gettext "Converting detached zone boot environment '%s'.")
2N/Am_log_progress_to=$(gettext "Progress being logged to %s")
2N/Am_log_copied_to=$(gettext "Log saved in non-global zone as %s")
2N/Am_manual_snapshot_cleanup=$(gettext "Manual cleanup of snapshot %s required")
2N/A
2N/Anot_readable=$(gettext "Cannot read file '%s'")
2N/Anot_flar=$(gettext "Input is not a flash archive")
2N/Abad_flar=$(gettext "Flash archive is a corrupt")
2N/Abad_zfs_flar=$(gettext "Flash archive contains a ZFS send stream.\n\tRecreate the flar using the -L option with cpio or pax.")
2N/Af_unpack_failed=$(gettext "Unpacking the archive failed")
2N/Aunknown_archiver=$(gettext "Archiver %s is not supported")
2N/Acmd_not_exec=$(gettext "Required command '%s' not executable!")
2N/Ainstalling=$(gettext " Installing: This may take several minutes...")
2N/Ano_installing=$(gettext " Installing: Using existing zone boot environment")
2N/Afrom_clone=$(gettext " Installing: Using clone of zone boot environment '%s'")
2N/Av_rmzones=$(gettext "The following zones in this image will be unusable: %s")
2N/Av_rmzonepaths=$(gettext "These zonepaths will be extracted but will not be used:")
2N/A
2N/A
2N/A#
2N/A# Exit values used by the script, as #defined in <sys/zone.h>
2N/A#
2N/AZONE_SUBPROC_OK=0
2N/AZONE_SUBPROC_UNAVAILABLE=252
2N/AZONE_SUBPROC_USAGE=253
2N/AZONE_SUBPROC_TRYAGAIN=254
2N/AZONE_SUBPROC_FATAL=255
2N/A