#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
#
typeset -n z=$1
typeset fields
if (( ${#fields[@]} < 6 )); then
"${zone.name}"
return 1
fi
return 0
}
#
# run cmd undo rollback
#
# Runs the command given in the cmd array. If all goes well, it adds the
# command in the undo array to the rollback array (of arrays) and returns 0.
# If the command does not return 0, the commands that exist in the rollback
# array are executed in the reverse of the order that they were added.
#
# In all cases, the exit value from the command in cmd is returned.
#
function run {
typeset -n cmd=$1
typeset -n undo=$2
typeset -n rollback=$3
return 0
fi
( eval "${cmd[@]}" )
typeset -i rv=$?
# Push the undo array onto the rollback stack
typeset -i rbsize=${#rollback[@]}
return 0
fi
typeset -i i
for (( i = ${#rollback[@]} - 1; i >= 0; i-- )); do
log "Executing rollback command <%s>" "${rollback[i][*]}"
( eval "${rollback[i][@]}" )
done
return $rv
}
#
# get_active_be zone
#
# Overrides shared get_active_be. Is used instead of brand-specific
# get_active_be because they know about the new layout, not the old.
#
function get_active_be {
typeset -n zone=$1
typeset -A uuid2gzbe
typeset -i needs_selection=0
#
# solaris10 only supports a single for those that need to be converted.
#
return 0
fi
#
# The rest is solaris brand or something that uses the solaris brand
#
#
# Load an associative array of global zone BEs. Store current uuid
# of GZBE in $active_gzbe.
#
# Initially the gzbes associative array is one dimensional. The
# following loop (beginning with zfs list) will add a second
# dimension. The fully populated uiid2gzbe structure will look like:
#
# uuid2gzbe[<gzbe uuid>]=<gzbe name>
# uuid2gzbe[<gzbe uuid>][ngzbe name]=<ngz active>
#
typeset active_gzbe
[[ $active == *N* ]] && active_gzbe=$uuid
done
if [[ -z $active_gzbe ]]; then
return 1
fi
#
# Look for the best match of zone BEs
#
typeset prop_parent="org.opensolaris.libbe:parentbe"
typeset prop_active="org.opensolaris.libbe:active"
typeset -A ngzbe
zfs list -H -r -d 1 -t filesystem -o name,$prop_parent,$prop_active \
# skip the non-BE top-level dataset
fi
continue
fi
done
if [[ -n $active_ds ]]; then
return 0
fi
#
# Active dataset was not found by property. See if something is mounted
# on the zone root.
#
return 0
fi
#
# If no active boot environment was found, offer a table of available
# boot environments
#
print "\nThe following unconverted boot environments exist this zone.\n"
print -- "Boot Environment NGZ Active Global Zone Boot Environment" \
"GZ Active"
print -- "----------------- ----------- ----------------------------" \
"---------"
else
fi
done
print "\nUse:\n"
print "\tdsconvert -b <bootenv> ...\n"
print "to specify the boot environment to activate during conversion.\n"
return 1
}
function usage {
print "Usage: dsconvert [-hnv] [-b <BE>] zone\n\n" \
" -h\t\tShow this help message\n" \
" -n\t\tDry run - make no changes\n" \
" -v\t\tVerbose\n" \
" -b <BE>\tActivate the specified zone boot environment\n"
}
#
# main
#
typeset -i dryrun=0
m_conversion_complete="%s: Conversion complete"
;;
h) usage
exit 0
;;
m_conversion_complete="%s: Dryrun complete"
;;
?) usage 1>&2
exit 1
;;
esac
done
if (( $# != 1 )); then
usage 1>&2
exit 1
fi
zonename=$1
exit 1
}
typeset -i has_bes=0
typeset active_ds=
exit 1
}
get_zone_state zone || exit 1
#
# This check is used for the brief window where people may be trying to use
# an ON build with a build of the solaris brand that is too old. Since the
# sysboot script was introduced with the dataset layout change, it makes for
# an easy check.
#
then
exit 1
fi
installed) : ;;
# Be sure that there's a dataset under the old ROOT
typeset be_datasets
set -A be_datasets $(zfs list -H -o name -d 1 -r \
if (( ${#be_datasets[@]} < 2 )); then
else
}
fi
;;
"${zone.state}"
exit 1
;;
esac
if [[ -n $bootenv ]]; then
"$zonename" "${zone.brand}"
exit 1
fi
zone.active_ds="${zone.path.ds}/ROOT/$bootenv"
exit 1
fi
else
get_active_be zone || exit 1
fi
#
# Look at zonecfg fs and dataset resources to see if anything under /export
# is added. The message that comes out of zonecfg_has_export is not helpful
# in this particular case, so we ignore it.
#
typeset -i skip_export=0
set -A cmd zfs create -o mountpoint=/rpool -o zoned=on "${zone.rpool_ds}"
set -A undo zfs destroy "${zone.rpool_ds}"
set -A cmd zfs create -o mountpoint=/export "${zone.rpool_ds}/export"
set -A undo zfs destroy "${zone.rpool_ds}/export"
fi
set -A props -- -o mountpoint=legacy -o canmount=noauto
if [[ ${zone.brand} != solaris10 ]]; then
a_push props -o "com.oracle.libbe:nbe_handle=on"
fi
#
# Moving the dataset hierarchy is problematic when clones are involved.
# The easiest way to avoid the problem with clones is to use more of them.
#
set -A cmd zfs set -r canmount=off "${zone.path.ds}/ROOT"
"${zone.path.ds}/ROOT"
#
# Unconventional quoting of date format string is to silence SCCS keyword
# warnings.
#
set -A cmd zfs snapshot -r "${zone.path.ds}/ROOT@$snapname"
#
# Get a list of all the old datasets and their properties. Once we
# have that, use that information to create clones.
#
typeset -a dslist
get_datasets -p ${zone.path.ds}/ROOT dslist || {
"${zone.name}" "${zone.path.ds}/ROOT"
exit 1
}
#
# Go through the list and be sure they are all unmounted and won't become
# mounted. This is needed because the clones that get created will need to use
# the mount points currently used by the originals. Ignore the first item in
# the array because that is the ROOT dataset.
#
typeset -i i
for (( i = ${#dslist[@]} - 1; i > 1; i-- )); do
# Before this conversion multiple datasets in the BE were broken.
# Therefore, we don't deal with them here either.
if [[ ${dslist[i].name} == ${zone.path.ds}/ROOT/*/* ]]; then
# Force a rollback and exit
"%s: Unsupported non-root dataset %s in boot environment" \
set -A undo echo "this command will never run"
exit 1
fi
"${dslist[i].name}"
fi
if [[ ${dslist[i].props[mountpoint].value} == legacy ]]; then
# Create undo using existing mountpoint
else
# This conversion process assumes no one uses temporary
# mount points for zones before this conversion is
# necessary.
fi
fi
done
#
# The sources are all unmounted and along the way we verified that it is only
# root datasets. Create clones.
#
for (( i = 1; i < ${#dslist[@]}; i++ )); do
set -A props -- -o mountpoint=/ -o canmount=noauto
# preserve user properties
# Skip non-user properties and inherited user properties
[[ $prop == *:* ]] || continue
continue
done
new=${dslist[i].name/${zone.path.ds}/${zone.rpool_ds}}
set -a undo destroy_zone_dataset "$new"
done
#
# The layout is now correct, except for datasets that need to be
# deleted. Load the brand-specific common functions. Note that
# all of the non-solaris10 brands that this script support use
# the solaris common.ksh.
#
else
. /usr/lib/brand/solaris/common.ksh
fi
# Clean up boot environment properties as needed
#
# If we started this exercise with a zone that was attached, just run
# the rough equivalent of the sysboot hook to mount it. If it wasn't
# attached, then we need to remount the active dataset at the zone
# root.
#
if [[ ${zone.state} == installed ]]; then
set -A cmd mount_active_be -c zone
if [[ ${zone.brand} != solaris10 ]]; then
fi
elif [[ ${zone.state} == unavailable ]]; then
set -A cmd mount_active_be -c zone
if [[ ${zone.brand} == solaris10 ]]; then
else
fi
else # detached
#
# Before the dataset revision, each BE only supported a single
# (root) dataset. As such, we can use a rather simplistic
# approach to mounting the active BE.
#
set -A cmd zfs set canmount=off "${zone.active_ds}"
set -A cmd zfs set zoned=off "${zone.active_ds}"
set -A undo zfs inherit zoned "${zone.active_ds}"
set -A cmd zfs set mountpoint="${zone.root}" "${zone.active_ds}"
"${zone.active_ds}"
set -A cmd zfs set canmount=on "${zone.active_ds}"
set -A undo zfs set canmount=off "${zone.active_ds}"
set -A cmd zfs mount "${zone.active_ds}"
set -A undo zfs unmount "${zone.active_ds}"
if [[ ${zone.brand} == solaris10 ]]; then
else
fi
fi
fi
#
# Clean up old datasets
#
for (( i = ${#dslist[@]} - 1; i >= 0; i-- )); do
"${dslist[i].name}"
done
"${zone.name}"
fi
#
# Convert for dataset aliases
#
# If there are no delegated datasets, this does nothing. If there are
# delegated datasets that don't have aliases defined (none should have
# aliases yet), the default aliases get populated into the zone configuration.
#
typeset -i errors=0
if (( $? != 0 )); then
cat <<-NOMORE
The automated conversion process is complete.
Before the zone will boot zone configuration problems need to be
corrected. Run 'zonecfg -z $zonename' then use the verify subcommand
within zonecfg to get a list of configuration problems that must be
corrected within zonecfg.
NOMORE
fi
log "%s: Verifying zone with zoneadm\n" "$zonename"
zoneadm -z "$zonename" verify
if (( $? != 0 )); then
if (( errors == 0 )); then
print "\nThe automated conversion process is complete."
fi
(( errors++ ))
cat <<-NOMORE
As displayed above, system configuration problems exist that must
be corrected befor the zone will boot. Before booting $zonename you
will need to manually correct those problems.
You may run 'zoneadm -z $zonename verify' at any time to re-verify the
system configuration related to this zone.
NOMORE
fi
#
# Check for potential /dev/zvol conflicts. While this may emit warnings, it
# it should not cause the exit code to change from 0.
#
log "%s: Checking for potential configuration conflicts\n" "$zonename"
zonecfg -z "$zonename" verify -v
if (( $? != 0 )); then
cat <<-NOMORE
With Zone Dataset Aliasing, ZFS datasets that are delegated to
$zonename appear within the zone as virtual ZFS pools. One or more
of the device resources that appears in the configuration for
$zonename may hide the existence of similarly named ZFS volumes
within these virtual pools. See zonecfg(1M) and dev(7FS) for
details.
You may run 'zonecfg -z $zonename verify -v' at any time to perform
this check again.
NOMORE
fi
if (( errors != 0 )); then
exit 1
fi
log "$m_conversion_complete" "$zonename"
exit 0