nightly.sh revision e599109e84b712d1412651bae74b1b9a7967d446
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
#
# Based on the nightly script from the integration folks,
# Mostly modified and owned by mike_s.
# Changes also by kjc, dmk.
#
# BRINGOVER_WS may be specified in the env file.
# The default is the old behavior of CLONE_WS
#
# -i on the command line, means fast options, so when it's on the
# command line (only), lint and check builds are skipped no matter what
# the setting of their individual flags are in NIGHTLY_OPTIONS.
#
# LINTDIRS can be set in the env file, format is a list of:
#
# /dirname-to-run-lint-on flag
#
# Where flag is: y - enable lint noise diff output
# n - disable lint noise diff output
#
#
# OPTHOME and TEAMWARE may be set in the environment to override /opt
#
#
# The CDPATH variable causes ksh's `cd' builtin to emit messages to stdout
# under certain circumstances, which can really screw things up; unset it.
#
unset CDPATH
# Get the absolute path of the nightly script that the user invoked. This
# may be a relative path, and we need to do this before changing directory.
nightly_path=`whence $0`
#
# Keep track of where we found nightly so we can invoke the matching
# which_scm script. If that doesn't work, don't go guessing, just rely
# or the user's workspace.
#
if [[ ! -x $WHICH_SCM ]]; then
fi
#
# Datestamp for crypto tarballs. We don't use BUILD_DATE because it
# doesn't sort right and it uses English abbreviations for the month.
# We want to guarantee a consistent string, so just invoke date(1)
# once and save the result in a global variable. YYYY-MM-DD is easier
# to parse visually than YYYYMMDD.
#
cryptostamp=$(date +%Y-%m-%d)
#
# Echo the path for depositing a crypto tarball, creating the target
# directory if it doesn't already exist.
# usage: cryptodest suffix
# where "suffix" is "" or "-nd".
#
function cryptodest {
typeset suffix=$1
#
# $PKGARCHIVE gets wiped out with each build, so put the
# tarball one level up.
#
#
# Put the suffix after the datestamp to make it easier for
# gatelings to use crypto from a specific date (no need to
# copy and rename the gate tarball).
#
echo "$dir/on-crypto-$cryptostamp$suffix.$MACH.tar"
}
#
# Create a non-stamped symlink to the given crypto tarball.
# Return 0 on success, non-zero on failure.
#
function cryptolink {
typeset targpath=$1
typeset suffix=$2
echo "no crypto at $targpath"
return 1
fi
return $?
}
#
# Generate a crypto tarball from the proto area and put it in the
# canonical location, along with the datestamp-free symlink.
# Sets build_ok to "n" if there is a problem.
#
function crypto_from_proto {
typeset label=$1
typeset suffix=$2
typeset -i stat
typeset to
#
# Generate the crypto THIRDPARTYLICENSE file. This needs to
# be done after the build has finished and before we run
# cryptodrop. We'll generate the file twice if we're building
# both DEBUG and non-DEBUG, but it's a cheap operation and not
# worth the complexity to only do once.
#
if (( $? != 0 )) ; then
echo "Couldn't create crypto THIRDPARTYLICENSE file." |
build_ok=n
return
fi
else
fi
if (( $? != 0 )) ; then
build_ok=n
else
if (( $? != 0 )) ; then
build_ok=n
fi
fi
}
#
# Function to do a DEBUG and non-DEBUG build. Needed because we might
# need to do another for the source build, and since we only deliver DEBUG or
# non-DEBUG packages.
#
# usage: normal_build
#
function normal_build {
typeset orig_p_FLAG="$p_FLAG"
typeset orig_a_FLAG="$a_FLAG"
typeset crypto_in="$ON_CRYPTO_BINS"
typeset crypto_signer="$CODESIGN_USER"
suffix=""
# non-DEBUG build begins
fi
fi
else
fi
# non-DEBUG build ends
# DEBUG build begins
build "DEBUG" "$suffix" "" "$MULTI_PROTO" "$crypto_in"
fi
fi
else
fi
# DEBUG build ends
}
#
# usage: run_hook HOOKNAME ARGS...
#
# If variable "$HOOKNAME" is defined, insert a section header into
# our logs and then run the command with ARGS
#
function run_hook {
HOOKNAME=$1
shift
(
# Let exit status propagate up
fi
) | tee -a $mail_msg_file >> $LOGFILE
build_ok=n
tee -a $mail_msg_file >> $LOGFILE
exit 1
fi
fi
}
#
# usage: filelist DESTDIR PATTERN
#
function filelist {
DEST=$1
PATTERN=$2
cd ${DEST}
if [ ! -f ${OBJFILES} ]; then
return;
fi
do
# wildcard expansion
for j in $i
do
if [ -f "$j" ]; then
echo $j
fi
if [ -d "$j" ]; then
echo $j
fi
done
}
# function to save off binaries after a full build for later
# restoration
function save_binaries {
# save off list of binaries
tee -a $mail_msg_file >> $LOGFILE
rm -f ${BINARCHIVE}
cd ${CODEMGR_WS}
> ${BINARCHIVE}
}
# delete files
# usage: hybridize_files DESTDIR MAKE_TARGET
function hybridize_files {
DEST=$1
MAKETARG=$2
tee -a $mail_msg_file >> $LOGFILE
do
done
do
rm -f ${i}+
< ${i} > ${i}+
mv ${i}+ ${i}
done
}
# restore binaries into the proper source tree.
# usage: restore_binaries DESTDIR MAKE_TARGET
function restore_binaries {
DEST=$1
MAKETARG=$2
tee -a $mail_msg_file >> $LOGFILE
cd ${DEST}
zcat ${BINARCHIVE} | \
}
# rename files we save binaries of
# usage: rename_files DESTDIR MAKE_TARGET
function rename_files {
DEST=$1
MAKETARG=$2
tee -a $mail_msg_file >> $LOGFILE
do
echo ${i} | tee -a $mail_msg_file >> ${LOGFILE}
rm -f ${i}.export
mv ${i} ${i}.export
done
}
#
# Copy some or all of the source tree.
#
# Returns 0 for success, non-zero for failure.
#
# usage: copy_source CODEMGR_WS DESTDIR LABEL SRCROOT
#
function copy_source {
WS=$1
DEST=$2
label=$3
srcroot=$4
if (( $? != 0 )) ; then
tee -a $mail_msg_file >> $LOGFILE
build_ok=n
return 1
fi
cd "$WS"
if (( $? != 0 )) ; then
tee -a $mail_msg_file >> $LOGFILE
build_ok=n
return 1
fi
;;
if (( $? != 0 )) ; then
build_ok=n
return 1
fi
;;
*)
build_ok=n
echo "Tree copy is not supported for workspace type" \
return 1
;;
esac
return 0
}
#
# Mercurial-specific copy code for copy_source(). Handles the
# combined open and closed trees.
#
# Returns 0 for success, non-zero for failure.
#
# usage: copy_source_mercurial destdir srcroot
#
function copy_source_mercurial {
typeset dest=$1
typeset srcroot=$2
typeset open_top closed_top
usr)
fi
;;
return 1
fi
;;
*)
;;
esac
if (( $? != 0 )) ; then
tee -a $mail_msg_file >> $LOGFILE
return 1
fi
fi
if (( $? != 0 )) ; then
return 1
fi
else
if (( $? != 0 )) ; then
tee -a $mail_msg_file >> $LOGFILE
return 1
fi
fi
fi
return 0
}
#
# usage: set_up_source_build CODEMGR_WS DESTDIR MAKE_TARGET
# Sets SRC to the modified source tree, for use by the caller when it
# builds the tree.
#
function set_up_source_build {
WS=$1
DEST=$2
MAKETARG=$3
if (( $? != 0 )); then
echo "\nCould not copy source tree for source build." |
tee -a $mail_msg_file >> $LOGFILE
build_ok=n
return
fi
cd $SRC
cd ${DEST}
if [ "${MAKETARG}" = "CRYPT_SRC" ]; then
rm -f ${CODEMGR_WS}/crypt_files.cpio.Z
tee -a $mail_msg_file >> $LOGFILE
for i in `cat ${CRYPT_FILES}`
do
# make sure the files exist
if [ -f "$i" ]; then
continue
fi
if [ -d "$i" ]; then
continue
fi
done
fi
if [ "${MAKETARG}" = "EXPORT_SRC" ]; then
# rename first, since we might restore a file
# of the same name (mapfiles)
fi
fi
# save the cleartext
tee -a $mail_msg_file >> $LOGFILE
cd ${DEST}
compress > ${CODEMGR_WS}/${MAKETARG}.cpio.Z
if [ "${MAKETARG}" = "EXPORT_SRC" ]; then
fi
if [ "${MAKETARG}" = "CRYPT_SRC" ]; then
fi
}
# Return library search directive as function of given root.
function myldlibs {
}
# Return header search directive as function of given root.
function myheaders {
}
#
# Wrapper over commands that generate BFU archives. The entire
# command output gets written to LOGFILE, and any unexpected messages
# are written to the mail message. Returns with the status of the
# original command.
#
function makebfu_filt {
typeset tmplog
typeset errors
typeset cmd
integer cmd_stat
cmd="$1"
shift
cmd_stat=$?
>> "$mail_msg_file"
fi
return $cmd_stat
}
#
# Unpack the crypto tarball into the proto area. We first extract the
# tarball into a temp directory so that we can handle the non-DEBUG
# tarball correctly with MULTI_PROTO=no.
# Return 0 on success, non-zero on failure.
#
function unpack_crypto {
typeset tarfile=$1
typeset suffix=$2
typeset ctop=$(mktemp -d /tmp/crypto.XXXXXX)
echo "Unpacking crypto ($tarfile)..."
return 1
fi
#
# We extract with -p so that we maintain permissions on directories.
#
typeset -i stat=$?
return $stat
}
#
# Function to do the build, including cpio archive and package generation.
# usage: build LABEL SUFFIX ND MULTIPROTO CRYPTO
# - LABEL is used to tag build output.
# - SUFFIX is used to distinguish files (e.g., DEBUG vs non-DEBUG,
# open-only vs full tree).
# - ND is "-nd" (non-DEBUG builds) or "" (DEBUG builds).
# - If MULTIPROTO is "yes", it means to name the proto area according to
# SUFFIX. Otherwise ("no"), (re)use the standard proto area.
# - CRYPTO is the path to the crypto tarball, or null.
#
function build {
LABEL=$1
SUFFIX=$2
ND=$3
MULTIPROTO=$4
CRYPTOPATH=$5
CPIODIR=${CPIODIR_ORIG}${SUFFIX}
[ $MULTIPROTO = no ] || export ROOT=$ROOT$SUFFIX
export CLOSEDROOT=${ROOT}-closed
fi
export ENVLDLIBS1=`myldlibs $ROOT`
export ENVCPPFLAGS1=`myheaders $ROOT`
#
# Build OS-Networking source
#
>> $LOGFILE
cd $SRC
fi
if [ "$?" = "0" ]; then
build_ok=n
fi
if [ "$?" = "0" ]; then
build_ok=n
fi
if (( $? != 0 )) ; then
build_ok=n
fi
fi
fi
>> $LOGFILE
tail -3 $SRC/${INSTALLOG}.out >>$mail_msg_file
fi
fi
fi
#
# Re-sign selected binaries using signing server
# (gatekeeper builds only)
#
signing_file="${TMPDIR}/signing"
rm -f ${signing_file}
export CODESIGN_USER
tee -a ${signing_file} >> $LOGFILE
>> $LOGFILE
if (( $? == 0 )) ; then
build_ok=n
fi
fi
#
# Create cpio archives for preintegration testing (PIT)
#
>> $LOGFILE
# hack for test folks
else
X=${CPIODIR}
fi
else
>> $LOGFILE
fi
#
# Building Packages
#
>> $LOGFILE
continue
fi
cd $SRC/$d
done
continue
fi
done
else
#
# Handle it gracefully if -p was set but there are
# neither pkg nor pkgdefs directories.
#
>> $LOGFILE
fi
else
fi
}
# Usage: dolint /dir y|n
function dolint {
if [ ! -d "$1" ]; then
echo "dolint error: $1 is not a directory"
exit 1
fi
if [ "$2" != "y" -a "$2" != "n" ]; then
echo "dolint internal error: $2 should be 'y' or 'n'"
exit 1
fi
lintdir=$1
dodiff=$2
export ENVLDLIBS1=`myldlibs $ROOT`
export ENVCPPFLAGS1=`myheaders $ROOT`
#
# '$MAKE lint' in $lintdir
#
# remove old lint.out
if [ -f $lintdir/lint-noise.ref ]; then
fi
cd $lintdir
#
# Remove all .ln files to ensure a full reference file
#
rm -f Nothing_to_remove \
tail -3 $LINTOUT >>$mail_msg_file
fi
fi
# should be none, though there are a few that were filtered out
# above
| sort | uniq >> $mail_msg_file
fi
}
# Install proto area from IHV build
function copy_ihv_proto {
echo "\n==== Installing IHV proto area ====\n" \
>> $LOGFILE
fi
else
fi
fi
# If there's a non-DEBUG version of the IHV proto area,
# copy it, but copy something if there's not.
cd $IA32_IHV_ROOT-nd
else
return
fi
fi
}
# Install IHV packages in PKGARCHIVE
# usage: copy_ihv_pkgs LABEL SUFFIX
function copy_ihv_pkgs {
LABEL=$1
SUFFIX=$2
# always use non-DEBUG IHV packages
>> $LOGFILE
else
fi
>> $LOGFILE
else
fi
}
#
# Build and install the onbld tools.
#
# usage: build_tools DESTROOT
#
# returns non-zero status if the build was successful.
#
function build_tools {
DESTROOT=$1
>> $LOGFILE
cd ${TOOLS}
egrep -v warning >> $mail_msg_file
return $?
}
#
# Set up to use locally installed tools.
#
# usage: use_tools TOOLSROOT
#
function use_tools {
TOOLSROOT=$1
#
# If we're not building ON workspace, then the TOOLSROOT
# settings here are clearly ignored by the workspace
# makefiles, prepending nonexistent directories to PATH is
# harmless, and we clearly do not wish to override
# ONBLD_TOOLS.
#
# If we're building an ON workspace, then the prepended PATH
# elements should supercede the preexisting ONBLD_TOOLS paths,
# and we want to override ONBLD_TOOLS to catch the tools that
# don't have specific path env vars here.
#
# So the only conditional behavior is overriding ONBLD_TOOLS,
# and we check for "an ON workspace" by looking for
#
export STABS
export CTFSTABS
export GENOFFSETS
CTFCONVERT=${TOOLSROOT}/opt/onbld/bin/${MACH}/ctfconvert
export CTFCONVERT
export CTFMERGE
export CTFCVTPTBL
export CTFFINDMOD
else
fi
export ELFSIGN
export PATH
export ONBLD_TOOLS
fi
}
function staffer {
"$@"
else
arg="\"$1\""
shift
for i
do
done
fi
}
#
# Verify that the closed tree is present if it needs to be.
# Sets CLOSED_IS_PRESENT for future use.
#
function check_closed_tree {
if [ -d $CODEMGR_WS/usr/closed ]; then
CLOSED_IS_PRESENT="yes"
else
CLOSED_IS_PRESENT="no"
fi
export CLOSED_IS_PRESENT
fi
#
# If it's an old (pre-split) tree or an empty
# workspace, don't complain.
#
if grep -s CLOSED_BUILD $SRC/Makefile.master > /dev/null; then
echo "If the closed sources are not present," \
"ON_CLOSED_BINS"
echo "must point to the closed binaries tree."
build_ok=n
exit 1
fi
fi
}
function obsolete_build {
echo "WARNING: Obsolete $1 build requested; request will be ignored"
}
#
# wrapper over wsdiff.
# usage: do_wsdiff LABEL OLDPROTO NEWPROTO
#
function do_wsdiff {
label=$1
oldproto=$2
newproto=$3
tee -a $LOGFILE >> $mail_msg_file
wsdiff="wsdiff"
$wsdiff -r ${TMPDIR}/wsdiff.results $oldproto $newproto 2>&1 | \
tee -a $LOGFILE >> $mail_msg_file
}
#
# together.
#
function set_non_debug_build_flags {
export INTERNAL_RELEASE_BUILD ; INTERNAL_RELEASE_BUILD=
export RELEASE_BUILD ; RELEASE_BUILD=
unset EXTRA_OPTIONS
unset EXTRA_CFLAGS
}
function set_debug_build_flags {
export INTERNAL_RELEASE_BUILD ; INTERNAL_RELEASE_BUILD=
unset RELEASE_BUILD
unset EXTRA_OPTIONS
unset EXTRA_CFLAGS
}
if [ "$OPTHOME" = "" ]; then
export OPTHOME
fi
if [ "$TEAMWARE" = "" ]; then
export TEAMWARE
fi
USAGE='Usage: nightly [-in] [-V VERS ] [ -S E|D|H|O ] <env_file>
Where:
-i Fast incremental options (no clobber, lint, check)
-n Do not do a bringover
-V VERS set the build version string to VERS
-S Build a variant of the source product
E - build exportable source
D - build domestic source (exportable + crypt)
H - build hybrid source (binaries + deleted source)
O - build (only) open source
<env_file> file in Bourne shell syntax that sets and exports
variables that configure the operation of this script and many of
the scripts this one calls. If <env_file> does not exist,
non-DEBUG is the default build type. Build options can be set in the
NIGHTLY_OPTIONS variable in the <env_file> as follows:
-A check for ABI differences in .so files
-D do a build with DEBUG on
-F do _not_ do a non-DEBUG build
-G gate keeper default group of options (-au)
-I integration engineer default group of options (-ampu)
-M do not run pmodes (safe file permission checker)
-N do not run protocmp
-O generate OpenSolaris deliverables
-R default group of options for building a release (-mp)
-U update proto area in the parent
-V VERS set the build version string to VERS
-X copy x86 IHV proto area
-a create cpio archives
-f find unreferenced files
-i do an incremental build (no "make clobber")
-l do "make lint" in $LINTDIRS (default: $SRC y)
-m send mail to $MAILTO at end of build
-n do not do a bringover
-p create packages
-r check ELF runtime attributes in the proto area
-u update proto_list_$MACH and friends in the parent workspace;
when used with -f, also build an unrefmaster.out in the parent
-w report on differences between previous and current proto areas
-z compress cpio archives with gzip
-W Do not report warnings (freeware gate ONLY)
-S Build a variant of the source product
E - build exportable source
D - build domestic source (exportable + crypt)
H - build hybrid source (binaries + deleted source)
O - build (only) open source
'
#
# -x less public handling of xmod source for the source product
#
# A log file will be generated under the name $LOGFILE
# for partially completed build and log.`date '+%F'`
# in the same directory for fully completed builds.
#
# default values for low-level FLAGS; G I R are group FLAGS
A_FLAG=n
a_FLAG=n
C_FLAG=n
D_FLAG=n
F_FLAG=n
f_FLAG=n
i_FLAG=n; i_CMD_LINE_FLAG=n
l_FLAG=n
M_FLAG=n
m_FLAG=n
N_FLAG=n
n_FLAG=n
O_FLAG=n
o_FLAG=n
P_FLAG=n
p_FLAG=n
r_FLAG=n
T_FLAG=n
t_FLAG=y
U_FLAG=n
u_FLAG=n
V_FLAG=n
W_FLAG=n
w_FLAG=n
X_FLAG=n
z_FLAG=n
SD_FLAG=n
SE_FLAG=n
SH_FLAG=n
SO_FLAG=n
#
#
build_ok=y
function is_source_build {
return $?
}
#
# examine arguments
#
#
# single function for setting -S flag and doing error checking.
# usage: set_S_flag <type>
# where <type> is the source build type ("E", "D", ...).
#
function set_S_flag {
if is_source_build; then
echo "Can only build one source variant at a time."
exit 1
fi
if [ "$1" = "E" ]; then
SE_FLAG=y
elif [ "$1" = "D" ]; then
SD_FLAG=y
elif [ "$1" = "H" ]; then
SH_FLAG=y
elif [ "$1" = "O" ]; then
SO_FLAG=y
else
echo "$USAGE"
exit 1
fi
}
OPTIND=1
do
i ) i_FLAG=y; i_CMD_LINE_FLAG=y
;;
n ) n_FLAG=y
;;
S )
;;
+t ) t_FLAG=n
;;
V ) V_FLAG=y
;;
\? ) echo "$USAGE"
exit 1
;;
esac
done
# correct argument count after options
# test that the path to the environment-setting file was given
if [ $# -ne 1 ]; then
echo "$USAGE"
exit 1
fi
# check if user is running nightly as root
# ISUSER is set non-zero if an ordinary user runs nightly, or is zero
# when root invokes nightly.
#
# force locale to C
LC_COLLATE=C; export LC_COLLATE
LC_MESSAGES=C; export LC_MESSAGES
LC_MONETARY=C; export LC_MONETARY
LC_NUMERIC=C; export LC_NUMERIC
# clear environment variables we know to be bad for the build
unset LD_OPTIONS
unset CONFIG
unset GROUP
unset OWNER
unset REMOTE
unset ENV
unset ARCH
unset CLASSPATH
unset NAME
#
# To get ONBLD_TOOLS from the environment, it must come from the env file.
# If it comes interactively, it is generally TOOLS_PROTO, which will be
# clobbered before the compiler version checks, which will therefore fail.
#
unset ONBLD_TOOLS
#
# Setup environmental variables
#
if [ -f /etc/nightly.conf ]; then
. /etc/nightly.conf
fi
if [ -f $1 ]; then
if [[ $1 = */* ]]; then
. $1
else
. ./$1
fi
else
else
exit 1
fi
fi
# contents of stdenv.sh inserted after next line:
# STDENV_START
# STDENV_END
#
# place ourselves in a new task, respecting BUILD_PROJECT if set.
#
if [ -z "$BUILD_PROJECT" ]; then
else
fi
#
# See if NIGHTLY_OPTIONS is set
#
if [ "$NIGHTLY_OPTIONS" = "" ]; then
NIGHTLY_OPTIONS="-aBm"
fi
#
# If BRINGOVER_WS was not specified, let it default to CLONE_WS
#
if [ "$BRINGOVER_WS" = "" ]; then
fi
#
# If CLOSED_BRINGOVER_WS was not specified, let it default to CLOSED_CLONE_WS
#
if [ "$CLOSED_BRINGOVER_WS" = "" ]; then
fi
#
# If BRINGOVER_FILES was not specified, default to usr
#
if [ "$BRINGOVER_FILES" = "" ]; then
BRINGOVER_FILES="usr"
fi
#
# If the closed sources are not present, the closed binaries must be
# present for the build to succeed. If there's no pointer to the
# closed binaries, flag that now, rather than forcing the user to wait
# a couple hours (or more) to find out.
#
#
# Note: changes to the option letters here should also be applied to the
# bldenv script. `d' is listed for backward compatibility.
#
OPTIND=1
do
A ) A_FLAG=y
#
# If ELF_DATA_BASELINE_DIR is not defined, and we are on SWAN
# (based on CLOSED_IS_PRESENT), then refuse to run. The value
# of ELF version checking is greatly enhanced by including
# the baseline gate comparison.
if [ "$CLOSED_IS_PRESENT" = 'yes' -a \
"$ELF_DATA_BASELINE_DIR" = '' ]; then
echo "ELF_DATA_BASELINE_DIR must be set if the A" \
"flag is present in\nNIGHTLY_OPTIONS and closed" \
"sources are present. Update environment file."
exit 1;
fi
;;
a ) a_FLAG=y
;;
B ) D_FLAG=y
;; # old version of D
C ) C_FLAG=y
;;
D ) D_FLAG=y
;;
F ) F_FLAG=y
;;
f ) f_FLAG=y
;;
G ) a_FLAG=y
u_FLAG=y
;;
I ) a_FLAG=y
m_FLAG=y
p_FLAG=y
u_FLAG=y
;;
i ) i_FLAG=y
;;
l ) l_FLAG=y
;;
M ) M_FLAG=y
;;
m ) m_FLAG=y
;;
N ) N_FLAG=y
;;
n ) n_FLAG=y
;;
O ) O_FLAG=y
;;
o ) o_FLAG=y
;;
P ) P_FLAG=y
;; # obsolete
p ) p_FLAG=y
;;
R ) m_FLAG=y
p_FLAG=y
;;
r ) r_FLAG=y
;;
S )
;;
T ) T_FLAG=y
;; # obsolete
+t ) t_FLAG=n
;;
U ) if [ -z "${PARENT_ROOT}" ]; then
echo "PARENT_ROOT must be set if the U flag is" \
"present in NIGHTLY_OPTIONS."
exit 1
fi
if [ -n "${PARENT_TOOLS_ROOT}" ]; then
fi
U_FLAG=y
;;
u ) u_FLAG=y
;;
W ) W_FLAG=y
;;
w ) w_FLAG=y
;;
X ) # now that we no longer need realmode builds, just
# copy IHV packages. only meaningful on x86.
if [ "$MACH" = "i386" ]; then
X_FLAG=y
fi
;;
x ) XMOD_OPT="-x"
;;
z ) z_FLAG=y
;;
\? ) echo "$USAGE"
exit 1
;;
esac
done
if [ "$o_FLAG" = "y" ]; then
echo "Old-style build requires root permission."
exit 1
fi
# Set default value for STAFFER, if needed.
export STAFFER
fi
fi
export MAILTO
fi
export PATH
# roots of source trees, both relative to $SRC and absolute.
relsrcdirs="."
relsrcdirs="$relsrcdirs ../closed"
fi
abssrcdirs=""
for d in $relsrcdirs; do
abssrcdirs="$abssrcdirs $SRC/$d"
done
unset CH
if [ "$o_FLAG" = "y" ]; then
# root invoked old-style build -- make sure it works as it always has
# by exporting 'CH'. The current Makefile.master doesn't use this, but
# the old ones still do.
CH=
export CH
else
PROTOCMPTERSE="protocmp.terse -gu"
fi
POUND_SIGN="#"
# have we set RELEASE_DATE in our env file?
if [ -z "$RELEASE_DATE" ]; then
fi
BUILD_DATE=$(LC_ALL=C date +%Y-%b-%d)
DEV_CM="\"@(#)SunOS Internal Development: $LOGNAME $BUILD_DATE [$BASEWSDIR]\""
# we export POUND_SIGN, RELEASE_DATE and DEV_CM to speed up the build process
# by avoiding repeated shell invocations to evaluate Makefile.master definitions.
maketype="distributed"
# get the dmake version string alone
DMAKE_VERSION=$( $MAKE -v )
# focus in on just the dotted version number alone
DMAKE_MAJOR=$( echo $DMAKE_VERSION | \
# extract the second (or final) integer
DMAKE_MINOR=${DMAKE_MINOR%%.*}
# extract the first integer
DMAKE_MAJOR=${DMAKE_MAJOR%%.*}
CHECK_DMAKE=${CHECK_DMAKE:-y}
# x86 was built on the 12th, sparc on the 13th.
if [ "$CHECK_DMAKE" = "y" -a \
"$DMAKE_VERSION" != "Sun Distributed Make 7.3 2003/03/12" -a \
"$DMAKE_VERSION" != "Sun Distributed Make 7.3 2003/03/13" -a \( \
if [ -z "$DMAKE_VERSION" ]; then
echo "$MAKE is missing."
exit 1
fi
echo `whence $MAKE`" version is:"
echo " ${DMAKE_VERSION}"
cat <<EOF
This version may not be safe for use. Either set TEAMWARE to a better
path or (if you really want to use this version of dmake anyway), add
the following to your environment to disable this check:
CHECK_DMAKE=n
EOF
exit 1
fi
export PATH
export MAKE
#
# Make sure the crypto tarball is available if it's needed.
#
# Echo the non-DEBUG name corresponding to the given crypto tarball path.
function ndcrypto {
if [ -z "$1" ]; then
echo ""
return
fi
}
# Return 0 (success) if the required crypto tarball(s) are present.
function crypto_is_present {
echo "ON_CRYPTO_BINS is null or not set."
return 1
fi
echo "DEBUG crypto tarball is unavailable."
return 1
fi
fi
echo "Non-DEBUG crypto tarball is unavailable."
return 1
fi
fi
return 0
}
#
# Canonicalize ON_CRYPTO_BINS, just in case it was set to the -nd
# tarball.
#
if [ -n "$ON_CRYPTO_BINS" ]; then
fi
if [[ "$O_FLAG" = y && -z "$CODESIGN_USER" ]]; then
if ! crypto_is_present; then
echo "OpenSolaris deliveries need signed crypto."
exit 1
fi
fi
if [[ "$O_FLAG" = y ]]; then
export TONICBUILD=""
else
export TONICBUILD="#"
fi
if [ "${SUNWSPRO}" != "" ]; then
export PATH
fi
then
if [[ -f $HOME/.make.machines ]]
then
# Note: there is a hard tab and space character in the []s
# below.
maxjobs=${jobs##*=}
fi
then
# default
maxjobs=4
fi
export DMAKE_MAX_JOBS=$maxjobs
fi
export DMAKE_MODE
if [ -z "${ROOT}" ]; then
echo "ROOT must be set."
exit 1
fi
#
# if -V flag was given, reset VERSION to V_ARG
#
if [ "$V_FLAG" = "y" ]; then
fi
#
# Check for IHV root for copying ihv proto area
#
if [ "$X_FLAG" = "y" ]; then
if [ "$IA32_IHV_ROOT" = "" ]; then
echo "IA32_IHV_ROOT: must be set for copying ihv proto"
args_ok=n
fi
if [ ! -d "$IA32_IHV_ROOT" ]; then
echo "$IA32_IHV_ROOT: not found"
args_ok=n
fi
if [ "$IA32_IHV_WS" = "" ]; then
echo "IA32_IHV_WS: must be set for copying ihv proto"
args_ok=n
fi
if [ ! -d "$IA32_IHV_WS" ]; then
echo "$IA32_IHV_WS: not found"
args_ok=n
fi
fi
# Append source version
if [ "$SE_FLAG" = "y" ]; then
VERSION="${VERSION}:EXPORT"
fi
if [ "$SD_FLAG" = "y" ]; then
VERSION="${VERSION}:DOMESTIC"
fi
if [ "$SH_FLAG" = "y" ]; then
VERSION="${VERSION}:MODIFIED_SOURCE_PRODUCT"
fi
if [ "$SO_FLAG" = "y" ]; then
VERSION="${VERSION}:OPEN_ONLY"
fi
TMPDIR="/tmp/nightly.tmpdir.$$"
export TMPDIR
#
# Keep elfsign's use of pkcs11_softtoken from looking in the user home
# directory, which doesn't always work. Needed until all build machines
# have the fix for 6271754
#
export SOFTTOKEN_DIR
#
# Tools should only be built non-DEBUG. Keep track of the tools proto
# area path relative to $TOOLS, because the latter changes in an
# export build.
#
# overridden on the $MAKE command line in build_tools().
#
# create directories that are automatically removed if the nightly script
# fails to start correctly
function newdir {
dir=$1
while [ ! -d $dir ]; do
done
torm=
else
return 1
fi
done
return 0
}
# since this script assumes the build is from full source, it nullifies
# variables likely to have been set by a "ws" script; nullification
# confines the search space for headers and libraries to the proto area
# built from this immediate source.
#
# Juggle the logs and optionally send mail on completion.
#
function logshuffle {
fi
export LLOG
if [ -f $ATLOG/proto_list_tools_${MACH} ]; then
fi
if [ -f $TMPDIR/wsdiff.results ]; then
fi
if [ -f $TMPDIR/wsdiff-nd.results ]; then
fi
fi
#
# Now that we're about to send mail, it's time to check the noise
# file. In the event that an error occurs beyond this point, it will
# be recorded in the nightly.log file, but nowhere else. This would
# include only errors that cause the copying of the noise log to fail
# or the mail itself not to be sent.
#
exec >>$LOGFILE 2>&1
if [ -s $build_noise_file ]; then
echo "\n==== Nightly build noise ====\n" |
tee -a $LOGFILE >>$mail_msg_file
echo | tee -a $LOGFILE >>$mail_msg_file
fi
y)
;;
i)
;;
*)
;;
esac
export NIGHTLY_STATUS
${MAILTO}
fi
fi
}
#
# Remove the locks and temporary files on any exit
#
function cleanup {
set -- $newdirlist
while [ $# -gt 0 ]; do
shift; shift
done
}
function cleanup_signal {
build_ok=i
# this will trigger cleanup(), above.
exit 1
}
trap cleanup 0
#
# Generic lock file processing -- make sure that the lock file doesn't
# exist. If it does, it should name the build host and PID. If it
# doesn't, then make sure we can create it. Clean up locks that are
# known to be stale (assumes host name is unique among build systems
# for the workspace).
#
function create_lock {
lockf=$1
lockvar=$2
exit 1
exit 1
else
# stale lock; clear it out and try again
fi
done
}
#
# Return the list of interesting proto areas, depending on the current
# options.
#
function allprotos {
[ $MULTI_PROTO = yes ] && roots="$roots $ROOT-nd"
fi
if [[ $O_FLAG = y ]]; then
[ $MULTI_PROTO = yes ] && roots="$roots $ROOT-nd-closed"
fi
echo $roots
}
# Ensure no other instance of this script is running on this host.
# LOCKNAME can be set in <env_file>, and is by default, but is not
# required due to the use of $ATLOG below.
if [ -n "$LOCKNAME" ]; then
fi
#
# Create from one, two, or three other locks:
# - protects against multiple builds in same workspace
# - protects against multiple 'u' copy-backs
# - protects against multiple 'U' copy-backs
#
# Overriding ISUSER to 1 causes the lock to be created as root if the
# script is run as root. The default is to create it as $STAFFER.
if [ "$u_FLAG" = "y" ]; then
fi
if [ "$U_FLAG" = "y" ]; then
# NIGHTLY_PARENT_ROOT is written as root if script invoked as root.
fi
# Locks have been taken, so we're doing a build and we're committed to
# the directories we may have created so far.
#
# Create mail_msg_file
#
mail_msg_file="${TMPDIR}/mail_msg"
build_time_file="${TMPDIR}/build_time"
build_environ_file="${TMPDIR}/build_environ"
#
# Move old LOGFILE aside
# ATLOG directory already made by 'create_lock' above
#
if [ -f $LOGFILE ]; then
fi
#
# Build OsNet source
#
SECONDS=0
echo "\n==== Nightly $maketype build started: $START_DATE ====" \
echo "\nBuild project: $build_project\nBuild taskid: $build_taskid" | \
# make sure we log only to the nightly build file
build_noise_file="${TMPDIR}/build_noise"
echo "\n==== list of environment variables ====\n" >> $LOGFILE
if [ "$P_FLAG" = "y" ]; then
fi
if [ "$T_FLAG" = "y" ]; then
fi
if is_source_build; then
echo "WARNING: the -S flags do not support incremental" \
i_FLAG=n
fi
if [ "$N_FLAG" = "n" ]; then
echo "WARNING: the -S flags do not support protocmp;" \
"protocmp disabled\n" | \
N_FLAG=y
fi
if [ "$l_FLAG" = "y" ]; then
echo "WARNING: the -S flags do not support lint;" \
l_FLAG=n
fi
if [ "$C_FLAG" = "y" ]; then
echo "WARNING: the -S flags do not support cstyle;" \
C_FLAG=n
fi
else
if [ "$N_FLAG" = "y" ]; then
if [ "$p_FLAG" = "y" ]; then
cat <<EOF | tee -a $mail_msg_file >> $LOGFILE
WARNING: the p option (create packages) is set, but so is the N option (do
not run protocmp); this is dangerous; you should unset the N option
EOF
else
cat <<EOF | tee -a $mail_msg_file >> $LOGFILE
Warning: the N option (do not run protocmp) is set; it probably shouldn't be
EOF
fi
fi
fi
echo "WARNING: OpenSolaris deliveries (-O) require archives;" \
a_FLAG=y
fi
echo "WARNING: Neither DEBUG nor non-DEBUG build requested, but the" \
fi
#
# In the past we just complained but went ahead with the lint
# pass, even though the proto area was built non-DEBUG. It's
# unlikely that non-DEBUG headers will make a difference, but
# rather than assuming it's a safe combination, force the user
# to specify a DEBUG build.
#
echo "WARNING: DEBUG build not requested; disabling lint.\n" \
l_FLAG=n
fi
if [ "$f_FLAG" = "y" ]; then
if [ "$i_FLAG" = "y" ]; then
echo "WARNING: the -f flag cannot be used during incremental" \
f_FLAG=n
fi
if [ "${l_FLAG}${p_FLAG}" != "yy" ]; then
echo "WARNING: the -f flag requires -l, and -p;" \
f_FLAG=n
fi
fi
echo "WARNING: -w specified, but $ROOT does not exist;" \
w_FLAG=n
fi
if [ "$t_FLAG" = "n" ]; then
#
# We're not doing a tools build, so make sure elfsign(1) is
# new enough to safely sign non-crypto binaries. We test
# debugging output from elfsign to detect the old version.
#
-e /usr/lib/security/pkcs11_softtoken.so.1 2>&1 \
if [ -z "$newelfsigntest" ]; then
"will only sign crypto modules\n" | \
export ELFSIGN_OBJECT=true
elif [ "$VERIFY_ELFSIGN" = "y" ]; then
echo "WARNING: VERIFY_ELFSIGN=y requires" \
"the -t flag; ignoring VERIFY_ELFSIGN\n" | \
fi
fi
case $MULTI_PROTO in
*)
echo "WARNING: MULTI_PROTO is \"$MULTI_PROTO\"; " \
echo "Setting MULTI_PROTO to \"no\".\n" | \
export MULTI_PROTO=no
;;
esac
# If CODESIGN_USER is set, we'll want the crypto that we just built.
if [[ -n "$CODESIGN_USER" && -n "$ON_CRYPTO_BINS" ]]; then
echo "Clearing ON_CRYPTO_BINS for signing build." >> "$LOGFILE"
unset ON_CRYPTO_BINS
fi
# Save the current proto area if we're comparing against the last build
if [ -d "$ROOT.prev" ]; then
fi
fi
# Same for non-DEBUG proto area
fi
fi
# Echo the SCM types of $CODEMGR_WS and $BRINGOVER_WS
function wstypes {
typeset parent_type child_type junk
| read parent_type junk
# Probe BRINGOVER_WS to determine its type
if [[ $BRINGOVER_WS == svn*://* ]]; then
parent_type="subversion"
elif [[ $BRINGOVER_WS == file://* ]] &&
${BRINGOVER_WS#file://}/README.txt 2> /dev/null; then
parent_type="subversion"
elif [[ $BRINGOVER_WS == ssh://* ]]; then
parent_type="mercurial"
elif svn info $BRINGOVER_WS > /dev/null 2>&1; then
parent_type="subversion"
elif [[ $BRINGOVER_WS == http://* ]] && \
parent_type="mercurial"
else
parent_type="none"
fi
fi
# Probe CODEMGR_WS to determine its type
if [[ -d $CODEMGR_WS ]]; then
$WHICH_SCM | read child_type junk || exit 1
fi
# fold both unsupported and unrecognized results into "none"
;;
*) parent_type=none
;;
esac
;;
*) child_type=none
;;
esac
echo $child_type $parent_type
}
export SCM_TYPE PARENT_SCM_TYPE
#
# Decide whether to clobber
#
cd $SRC
# remove old clobber file
# Remove all .make.state* files, just in case we are restarting
# the build after having interrupted a previous 'make clobber'.
echo "\n==== Make clobber ERRORS ====\n" >> $mail_msg_file
egrep -v "Ignoring unknown host" \
cd ${TOOLS}
echo "\n==== Make tools clobber ERRORS ====\n" \
mkdir -p ${TOOLS_PROTO}
fi
# Get back to a clean workspace as much as possible to catch
# problems that only occur on fresh workspaces.
# Remove all .make.state* files, libraries, and .o's that may
# have been omitted from clobber. A couple of libraries are
# under source code control, so leave them alone.
# We should probably blow away temporary directories too.
cd $SRC
-name '*.o' \) -print | \
else
fi
# sleep on the parent workspace's lock
while egrep -s write $BRINGOVER_WS/Codemgr_wsdata/locks
do
sleep 120
done
if [[ -z $BRINGOVER ]]; then
fi
-w $CODEMGR_WS $BRINGOVER_FILES < /dev/null 2>&1 ||
if [ -s $TMPDIR/bringovercheck.out ]; then
echo "\n==== POST-BRINGOVER CLEANUP NOISE ====\n"
fi
}
# If the repository doesn't exist yet, then we want to populate it.
if [[ ! -d $CODEMGR_WS/.hg ]]; then
fi
#
# If the user set CLOSED_BRINGOVER_WS and didn't set CLOSED_IS_PRESENT
# to "no," then we'll want to initialise the closed repository
#
# We use $orig_closed_is_present instead of $CLOSED_IS_PRESENT,
# because for newly-created source trees, the latter will be "no"
# until after the bringover completes.
#
! -d $CODEMGR_WS/usr/closed/.hg ]]; then
export CLOSED_IS_PRESENT=yes
fi
#
# If the user has changes, regardless of whether those changes are
# committed, and regardless of whether those changes conflict, then
# we'll attempt to merge them either implicitly (uncommitted) or
# explicitly (committed).
#
# These are the messages we'll use to help clarify mercurial output
# in those cases.
#
typeset mergefailmsg="\
***\n\
*** nightly was unable to automatically merge your changes. You should\n\
*** redo the full merge manually, following the steps outlined by mercurial\n\
*** above, then restart nightly.\n\
***\n"
typeset mergepassmsg="\
***\n\
*** nightly successfully merged your changes. This means that your working\n\
*** directory has been updated, but those changes are not yet committed.\n\
*** After nightly completes, you should validate the results of the merge,\n\
*** then use hg commit manually.\n\
***\n"
#
# For each repository in turn:
#
# 1. Do the pull. If this fails, dump the output and bail out.
#
# 2. If the pull resulted in an extra head, do an explicit merge.
# If this fails, dump the output and bail out.
#
# Because we can't rely on Mercurial to exit with a failure code
# when a merge fails (Mercurial issue #186), we must grep the
#
# 3. If a merge failed, set the message and fail the bringover.
#
# 4. Otherwise, if a merge succeeded, set the message
#
# 5. Dump the output, and any message from step 3 or 4.
#
typeset HG_SOURCE=$BRINGOVER_WS
if [ ! -f $TMPDIR/new_repository ]; then
staffer hg --cwd $CODEMGR_WS incoming --bundle $HG_SOURCE \
#
# If there are no incoming changesets, then incoming will
# fail, and there will be no bundle file. Reset the source,
# to allow the remaining logic to complete with no false
# negatives. (Unlike incoming, pull will return success
# for the no-change case.)
#
if (( $? != 0 )); then
fi
fi
staffer hg --cwd $CODEMGR_WS pull -u $HG_SOURCE \
> $TMPDIR/pull_open.out 2>&1
if (( $? != 0 )); then
printf "%s: pull failed as follows:\n\n" "$CODEMGR_WS"
fi
return
fi
staffer hg --cwd $CODEMGR_WS merge \
>> $TMPDIR/pull_open.out 2>&1
if (( $? != 0 )); then
fi
return
fi
fi
printf "updated %s with the following results:\n" "$CODEMGR_WS"
fi
printf "\n"
#
# told not to via $CLOSED_IS_PRESENT, and we actually know where to
# pull from ($CLOSED_BRINGOVER_WS).
#
if [[ $CLOSED_IS_PRESENT = yes && \
-d $CODEMGR_WS/usr/closed/.hg && \
-n $CLOSED_BRINGOVER_WS ]]; then
if [ ! -f $TMPDIR/new_closed ]; then
--bundle $HG_SOURCE -v $CLOSED_BRINGOVER_WS \
#
# If there are no incoming changesets, then incoming will
# fail, and there will be no bundle file. Reset the source,
# to allow the remaining logic to complete with no false
# negatives. (Unlike incoming, pull will return success
# for the no-change case.)
#
if (( $? != 0 )); then
fi
fi
$HG_SOURCE > $TMPDIR/pull_closed.out 2>&1
if (( $? != 0 )); then
printf "closed pull failed as follows:\n\n"
fi
return
fi
>> $TMPDIR/pull_closed.out 2>&1
if (( $? != 0 )); then
printf "closed merge failed as follows:\n\n"
fi
return
fi
fi
fi
fi
#
# Per-changeset output is neither useful nor manageable for a
# newly-created repository.
#
if [ -f $TMPDIR/new_repository ]; then
return
fi
printf "\nadded the following changesets to open repository:\n"
#
# The closed repository could have been newly created, even though
# the open one previously existed...
#
if [ -f $TMPDIR/new_closed ]; then
return
fi
if [ -f $TMPDIR/incoming_closed.out ]; then
printf "\nadded the following changesets to closed repository:\n"
fi
}
if [[ ! -d $CODEMGR_WS/.svn ]]; then
else
typeset root
root=$(staffer svn info $CODEMGR_WS |
if [[ $root != $BRINGOVER_WS ]]; then
# We fail here because there's no way to update
# from a named repo.
cat <<-EOF
\$BRINGOVER_WS doesn't match repository root:
\$BRINGOVER_WS: $BRINGOVER_WS
Repository root: $root
EOF
touch $TMPDIR/bringover_failed
else
# If a conflict happens, svn still exits 0.
staffer svn update $CODEMGR_WS | tee $TMPDIR/pull.out ||
touch $TMPDIR/bringover_failed
if grep "^C" $TMPDIR/pull.out > /dev/null 2>&1; then
touch $TMPDIR/bringover_failed
fi
fi
fi
}
type bringover_none > /dev/null 2>&1 || function bringover_none {
echo "Couldn't figure out what kind of SCM to use for $BRINGOVER_WS."
touch $TMPDIR/bringover_failed
}
# Parse the URL.
# The other way to deal with empty components is to echo a string that can
# be eval'ed by the caller to associate values (possibly empty) with
# variables. In that case, passing in a printf string would let the caller
# choose the variable names.
function parse_url {
typeset url method host port path
url=$1
method=${url%%://*}
host=${url#$method://}
path=${host#*/}
host=${host%%/*}
if [[ $host == *:* ]]; then
port=${host#*:}
host=${host%:*}
fi
# method can never be empty. host can only be empty if method is
# file, and that implies it's localhost. path can default to / if
# it's otherwise empty, leaving port as the only component without
# a default, so it has to go last.
echo $method ${host:-localhost} ${path:-/} $port
}
function http_get {
typeset url method host port path
url=$1
if [[ -n $http_proxy ]]; then
parse_url $http_proxy | read method host path port
echo "GET $url HTTP/1.0\r\n" |
mconnect -p ${port:-8080} $host
else
parse_url $url | read method host path port
echo "GET $path HTTP/1.0\r\n" |
mconnect -p ${port:-80} $host
fi
}
#
# Decide whether to bringover to the codemgr workspace
#
if [ "$n_FLAG" = "n" ]; then
if [[ $SCM_TYPE != none && $SCM_TYPE != $PARENT_SCM_TYPE ]]; then
echo "cannot bringover from $PARENT_SCM_TYPE to $SCM_TYPE, " \
"quitting at `date`." | tee -a $mail_msg_file >> $LOGFILE
exit 1
fi
run_hook PRE_BRINGOVER
echo "\n==== bringover to $CODEMGR_WS at `date` ====\n" >> $LOGFILE
echo "\n==== BRINGOVER LOG ====\n" >> $mail_msg_file
eval "bringover_${PARENT_SCM_TYPE}" 2>&1 |
tee -a $mail_msg_file >> $LOGFILE
if [ -f $TMPDIR/bringover_failed ]; then
rm -f $TMPDIR/bringover_failed
build_ok=n
echo "trouble with bringover, quitting at `date`." |
tee -a $mail_msg_file >> $LOGFILE
exit 1
fi
#
# It's possible that we used the bringover above to create
# $CODEMGR_WS. If so, then SCM_TYPE was previously "none,"
# but should now be the same as $BRINGOVER_WS.
#
[[ $SCM_TYPE = none ]] && SCM_TYPE=$PARENT_SCM_TYPE
run_hook POST_BRINGOVER
#
# Possible transition from pre-split workspace to split
# workspace. See if the bringover changed anything.
#
CLOSED_IS_PRESENT="$orig_closed_is_present"
check_closed_tree
else
echo "\n==== No bringover to $CODEMGR_WS ====\n" >> $LOGFILE
fi
if [[ "$O_FLAG" = y && "$CLOSED_IS_PRESENT" != "yes" ]]; then
build_ok=n
echo "OpenSolaris binary deliverables need usr/closed." \
| tee -a "$mail_msg_file" >> $LOGFILE
exit 1
fi
if [ "$CLOSED_IS_PRESENT" = no ]; then
#
# Not all consolidations have a closed tree, and even if they
# did, they wouldn't necessarily have signed crypto. But if
# the current source base does have signed crypto and it can't
# be generated, error out, rather than silently building
# unusable binaries.
#
grep -s ELFSIGN_CRYPTO "$SRC/Makefile.master" > /dev/null
if (( $? == 0 )); then
crypto_is_present >> "$LOGFILE"
if (( $? != 0 )); then
build_ok=n
echo "A crypto tarball must be provided when" \
"there is no closed tree." |
tee -a "$mail_msg_file" >> "$LOGFILE"
exit 1
fi
fi
fi
echo "\n==== Build environment ====\n" | tee -a $build_environ_file >> $LOGFILE
# System
whence uname | tee -a $build_environ_file >> $LOGFILE
uname -a 2>&1 | tee -a $build_environ_file >> $LOGFILE
echo | tee -a $build_environ_file >> $LOGFILE
# nightly
echo "$0 $@" | tee -a $build_environ_file >> $LOGFILE
if [[ $nightly_path = "/opt/onbld/bin/nightly" ]] &&
#
# XXX This should work with ips legacy pkginfo for now, but will
# fall apart when we stop updating that.
#
pkginfo SUNWonbld > /dev/null 2>&1 ; then
pkginfo -l SUNWonbld | egrep "PKGINST:|VERSION:|PSTAMP:"
else
echo "$nightly_ls"
fi | tee -a $build_environ_file >> $LOGFILE
echo | tee -a $build_environ_file >> $LOGFILE
# make
whence $MAKE | tee -a $build_environ_file >> $LOGFILE
$MAKE -v | tee -a $build_environ_file >> $LOGFILE
echo "number of concurrent jobs = $DMAKE_MAX_JOBS" |
tee -a $build_environ_file >> $LOGFILE
#
# Report the compiler versions.
#
if [[ ! -f $SRC/Makefile ]]; then
build_ok=n
echo "\nUnable to find \"Makefile\" in $SRC." | \
tee -a $build_environ_file >> $LOGFILE
exit 1
fi
( cd $SRC
for target in cc-version cc64-version java-version; do
echo
#
# Put statefile somewhere we know we can write to rather than trip
# over a read-only $srcroot.
#
rm -f $TMPDIR/make-state
export SRC
if $MAKE -K $TMPDIR/make-state -e $target 2>/dev/null; then
continue
fi
touch $TMPDIR/nocompiler
done
echo
) | tee -a $build_environ_file >> $LOGFILE
if [ -f $TMPDIR/nocompiler ]; then
rm -f $TMPDIR/nocompiler
build_ok=n
echo "Aborting due to missing compiler." |
tee -a $build_environ_file >> $LOGFILE
exit 1
fi
# as
whence as | tee -a $build_environ_file >> $LOGFILE
as -V 2>&1 | head -1 | tee -a $build_environ_file >> $LOGFILE
echo | tee -a $build_environ_file >> $LOGFILE
# Check that we're running a capable link-editor
whence ld | tee -a $build_environ_file >> $LOGFILE
LDVER=`ld -V 2>&1`
echo $LDVER | tee -a $build_environ_file >> $LOGFILE
LDVER=`echo $LDVER | sed -e "s/.*-1\.//" -e "s/:.*//"`
if [ `expr $LDVER \< 422` -eq 1 ]; then
echo "The link-editor needs to be at version 422 or higher to build" | \
tee -a $build_environ_file >> $LOGFILE
echo "the latest stuff. Hope your build works." | \
tee -a $build_environ_file >> $LOGFILE
fi
#
# Build and use the workspace's tools if requested
#
if [[ "$t_FLAG" = "y" || "$O_FLAG" = y ]]; then
set_non_debug_build_flags
build_tools ${TOOLS_PROTO}
if [[ $? != 0 && "$t_FLAG" = y ]]; then
use_tools $TOOLS_PROTO
fi
fi
#
# copy ihv proto area in addition to the build itself
#
if [ "$X_FLAG" = "y" ]; then
copy_ihv_proto
fi
if [ "$i_FLAG" = "y" -a "$SH_FLAG" = "y" ]; then
echo "\n==== NOT Building base OS-Net source ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
else
# timestamp the start of the normal build; the findunref tool uses it.
touch $SRC/.build.tstamp
normal_build
fi
#
# Generate the THIRDPARTYLICENSE files if needed. This is done after
# the build, so that dynamically-created license files are there.
# It's done before findunref to help identify license files that need
# to be added to tools/opensolaris/license-list.
#
if [ "$O_FLAG" = y -a "$build_ok" = y ]; then
echo "\n==== Generating THIRDPARTYLICENSE files ====\n" |
tee -a "$mail_msg_file" >> "$LOGFILE"
mktpl usr/src/tools/opensolaris/license-list >> "$LOGFILE" 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create THIRDPARTYLICENSE files" |
tee -a "$mail_msg_file" >> "$LOGFILE"
fi
fi
ORIG_SRC=$SRC
BINARCHIVE=${CODEMGR_WS}/bin-${MACH}.cpio.Z
if [ "$SE_FLAG" = "y" -o "$SD_FLAG" = "y" -o "$SH_FLAG" = "y" ]; then
save_binaries
fi
# EXPORT_SRC comes after CRYPT_SRC since a domestic build will need
# $SRC pointing to the export_source usr/src.
if [ "$SE_FLAG" = "y" -o "$SD_FLAG" = "y" -o "$SH_FLAG" = "y" ]; then
if [ "$SD_FLAG" = "y" -a $build_ok = y ]; then
set_up_source_build ${CODEMGR_WS} ${CRYPT_SRC} CRYPT_SRC
fi
if [ $build_ok = y ]; then
set_up_source_build ${CODEMGR_WS} ${EXPORT_SRC} EXPORT_SRC
fi
fi
if [ "$SD_FLAG" = "y" -a $build_ok = y ]; then
# drop the crypt files in place.
cd ${EXPORT_SRC}
echo "\nextracting crypt_files.cpio.Z onto export_source.\n" \
>> ${LOGFILE}
zcat ${CODEMGR_WS}/crypt_files.cpio.Z | \
cpio -idmucvB 2>/dev/null >> ${LOGFILE}
if [ "$?" = "0" ]; then
echo "\n==== DOMESTIC extraction succeeded ====\n" \
>> $mail_msg_file
else
echo "\n==== DOMESTIC extraction failed ====\n" \
>> $mail_msg_file
fi
fi
if [ "$SO_FLAG" = "y" -a $build_ok = y ]; then
#
# Copy the open sources into their own tree, set up the closed
# binaries, and set up the environment. The build looks for
# the closed binaries in a location that depends on whether
# it's a DEBUG build, so we might need to make two copies.
#
# If copy_source fails, it will have already generated an
# error message and set build_ok=n, so we don't need to worry
# about that here.
#
copy_source $CODEMGR_WS $OPEN_SRCDIR OPEN_SOURCE usr/src
fi
if [ "$SO_FLAG" = "y" -a $build_ok = y ]; then
echo "\n==== Generating skeleton closed binaries for" \
"open-only build ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
rm -rf $CODEMGR_WS/closed.skel
if [ "$D_FLAG" = y ]; then
mkclosed $MACH $ROOT $CODEMGR_WS/closed.skel/root_$MACH \
>>$LOGFILE 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create skeleton DEBUG closed binaries." |
tee -a $mail_msg_file >> $LOGFILE
fi
fi
if [ "$F_FLAG" = n ]; then
root=$ROOT
[ "$MULTI_PROTO" = yes ] && root=$ROOT-nd
mkclosed $MACH $root $CODEMGR_WS/closed.skel/root_$MACH-nd \
>>$LOGFILE 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create skeleton non-DEBUG closed binaries." |
tee -a $mail_msg_file >> $LOGFILE
fi
fi
SRC=$OPEN_SRCDIR/usr/src
# Try not to clobber any user-provided closed binaries.
export ON_CLOSED_BINS=$CODEMGR_WS/closed.skel
export CLOSED_IS_PRESENT=no
fi
if is_source_build && [ $build_ok = y ] ; then
# remove proto area(s) here, since we don't clobber
rm -rf `allprotos`
if [ "$t_FLAG" = "y" ]; then
set_non_debug_build_flags
ORIG_TOOLS=$TOOLS
#
# SRC was set earlier to point to the source build
# source tree (e.g., $EXPORT_SRC).
#
TOOLS=${SRC}/tools
TOOLS_PROTO=${TOOLS}/${TOOLS_PROTO_REL}; export TOOLS_PROTO
build_tools ${TOOLS_PROTO}
if [[ $? != 0 ]]; then
use_tools ${TOOLS_PROTO}
fi
fi
export EXPORT_RELEASE_BUILD ; EXPORT_RELEASE_BUILD=#
normal_build
fi
if [[ "$SO_FLAG" = "y" && "$build_ok" = "y" ]]; then
rm -rf $ON_CLOSED_BINS
fi
#
# There are several checks that need to look at the proto area, but
# they only need to look at one, and they don't care whether it's
# DEBUG or non-DEBUG.
#
if [[ "$MULTI_PROTO" = yes && "$D_FLAG" = n ]]; then
checkroot=$ROOT-nd
else
checkroot=$ROOT
fi
if [ "$build_ok" = "y" ]; then
echo "\n==== Creating protolist system file at `date` ====" \
>> $LOGFILE
protolist $checkroot > $ATLOG/proto_list_${MACH}
echo "==== protolist system file created at `date` ====\n" \
>> $LOGFILE
if [ "$N_FLAG" != "y" ]; then
E1=
f1=
if [ -d "$SRC/pkgdefs" ]; then
f1="$SRC/pkgdefs/etc/exception_list_$MACH"
if [ "$X_FLAG" = "y" ]; then
f1="$f1 $IA32_IHV_WS/usr/src/pkgdefs/etc/exception_list_$MACH"
fi
fi
for f in $f1; do
if [ -f "$f" ]; then
E1="$E1 -e $f"
fi
done
E2=
f2=
if [ -d "$SRC/pkg" ]; then
f2="$f2 exceptions/packaging"
if [ "$CLOSED_IS_PRESENT" = "no" ]; then
f2="$f2 exceptions/packaging.open"
else
f2="$f2 exceptions/packaging.closed"
fi
fi
for f in $f2; do
if [ -f "$f" ]; then
E2="$E2 -e $f"
fi
done
if [ -f "$REF_PROTO_LIST" ]; then
#
# For builds that copy the IHV proto area (-X), add the
# IHV proto list to the reference list if the reference
# was built without -X.
#
# For builds that don't copy the IHV proto area, add the
# IHV proto list to the build's proto list if the
# reference was built with -X.
#
# Use the presence of the first file entry of the cached
# IHV proto list in the reference list to determine
# whether it was built with -X or not.
#
IHV_REF_PROTO_LIST=$SRC/pkg/proto_list_ihv_$MACH
grepfor=$(nawk '$1 == "f" { print $2; exit }' \
$IHV_REF_PROTO_LIST 2> /dev/null)
if [ $? = 0 -a -n "$grepfor" ]; then
if [ "$X_FLAG" = "y" ]; then
grep -w "$grepfor" \
$REF_PROTO_LIST > /dev/null
if [ ! "$?" = "0" ]; then
REF_IHV_PROTO="-d $IHV_REF_PROTO_LIST"
fi
else
grep -w "$grepfor" \
$REF_PROTO_LIST > /dev/null
if [ "$?" = "0" ]; then
IHV_PROTO_LIST="$IHV_REF_PROTO_LIST"
fi
fi
fi
fi
fi
if [ "$N_FLAG" != "y" -a -f $SRC/pkgdefs/Makefile ]; then
echo "\n==== Impact on SVr4 packages ====\n" >> $mail_msg_file
#
# Compare the build's proto list with current package
# definitions to audit the quality of package
# definitions and makefile install targets. Use the
# current exception list.
#
PKGDEFS_LIST=""
for d in $abssrcdirs; do
if [ -d $d/pkgdefs ]; then
PKGDEFS_LIST="$PKGDEFS_LIST -d $d/pkgdefs"
fi
done
if [ "$X_FLAG" = "y" -a \
-d $IA32_IHV_WS/usr/src/pkgdefs ]; then
PKGDEFS_LIST="$PKGDEFS_LIST -d $IA32_IHV_WS/usr/src/pkgdefs"
fi
$PROTOCMPTERSE \
"Files missing from the proto area:" \
"Files missing from packages:" \
"Inconsistencies between pkgdefs and proto area:" \
${E1} \
${PKGDEFS_LIST} \
$ATLOG/proto_list_${MACH} \
>> $mail_msg_file
fi
if [ "$N_FLAG" != "y" -a -d $SRC/pkg ]; then
echo "\n==== Validating manifests against proto area ====\n" \
>> $mail_msg_file
( cd $SRC/pkg ; $MAKE -e protocmp ROOT="$checkroot" ) \
>> $mail_msg_file
fi
if [ "$N_FLAG" != "y" -a -f "$REF_PROTO_LIST" ]; then
echo "\n==== Impact on proto area ====\n" >> $mail_msg_file
if [ -n "$E2" ]; then
ELIST=$E2
else
ELIST=$E1
fi
$PROTOCMPTERSE \
"Files in yesterday's proto area, but not today's:" \
"Files in today's proto area, but not yesterday's:" \
"Files that changed between yesterday and today:" \
${ELIST} \
-d $REF_PROTO_LIST \
$REF_IHV_PROTO \
$ATLOG/proto_list_${MACH} \
$IHV_PROTO_LIST \
>> $mail_msg_file
fi
fi
if [ "$u_FLAG" = "y" -a "$build_ok" = "y" ]; then
staffer cp $ATLOG/proto_list_${MACH} \
$PARENT_WS/usr/src/proto_list_${MACH}
fi
# Update parent proto area if necessary. This is done now
# so that the proto area has either DEBUG or non-DEBUG kernels.
# Note that this clears out the lock file, so we can dispense with
# the variable now.
if [ "$U_FLAG" = "y" -a "$build_ok" = "y" ]; then
echo "\n==== Copying proto area to $NIGHTLY_PARENT_ROOT ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
rm -rf $NIGHTLY_PARENT_ROOT/*
unset Ulockfile
mkdir -p $NIGHTLY_PARENT_ROOT
if [[ "$MULTI_PROTO" = no || "$D_FLAG" = y ]]; then
( cd $ROOT; tar cf - . |
( cd $NIGHTLY_PARENT_ROOT; umask 0; tar xpf - ) ) 2>&1 |
tee -a $mail_msg_file >> $LOGFILE
fi
if [[ "$MULTI_PROTO" = yes && "$F_FLAG" = n ]]; then
rm -rf $NIGHTLY_PARENT_ROOT-nd/*
mkdir -p $NIGHTLY_PARENT_ROOT-nd
cd $ROOT-nd
( tar cf - . |
( cd $NIGHTLY_PARENT_ROOT-nd; umask 0; tar xpf - ) ) 2>&1 |
tee -a $mail_msg_file >> $LOGFILE
fi
if [ -n "${NIGHTLY_PARENT_TOOLS_ROOT}" ]; then
echo "\n==== Copying tools proto area to $NIGHTLY_PARENT_TOOLS_ROOT ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
rm -rf $NIGHTLY_PARENT_TOOLS_ROOT/*
mkdir -p $NIGHTLY_PARENT_TOOLS_ROOT
if [[ "$MULTI_PROTO" = no || "$D_FLAG" = y ]]; then
( cd $TOOLS_PROTO; tar cf - . |
( cd $NIGHTLY_PARENT_TOOLS_ROOT;
umask 0; tar xpf - ) ) 2>&1 |
tee -a $mail_msg_file >> $LOGFILE
fi
fi
fi
#
# ELF verification: ABI (-A) and runtime (-r) checks
#
if [[ ($build_ok = y) && ( ($A_FLAG = y) || ($r_FLAG = y) ) ]]; then
# Directory ELF-data.$MACH holds the files produced by these tests.
elf_ddir=$SRC/ELF-data.$MACH
# If there is a previous ELF-data backup directory, remove it. Then,
# rotate current ELF-data directory into its place and create a new
# empty directory
rm -rf $elf_ddir.ref
if [[ -d $elf_ddir ]]; then
mv $elf_ddir $elf_ddir.ref
fi
mkdir -p $elf_ddir
# Call find_elf to produce a list of the ELF objects in the proto area.
# This list is passed to check_rtime and interface_check, preventing
# them from separately calling find_elf to do the same work twice.
find_elf -fr $checkroot > $elf_ddir/object_list
if [[ $A_FLAG = y ]]; then
echo "\n==== Check versioning and ABI information ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
# Produce interface description for the proto. Report errors.
interface_check -o -w $elf_ddir -f object_list \
-i interface -E interface.err
if [[ -s $elf_ddir/interface.err ]]; then
tee -a $LOGFILE < $elf_ddir/interface.err \
>> $mail_msg_file
fi
# If ELF_DATA_BASELINE_DIR is defined, compare the new interface
# description file to that from the baseline gate. Issue a
# warning if the baseline is not present, and keep going.
if [[ "$ELF_DATA_BASELINE_DIR" != '' ]]; then
base_ifile="$ELF_DATA_BASELINE_DIR/interface"
echo "\n==== Compare versioning and ABI information" \
"to baseline ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
echo "Baseline: $base_ifile\n" >> $LOGFILE
if [[ -f $base_ifile ]]; then
interface_cmp -d -o $base_ifile \
$elf_ddir/interface > $elf_ddir/interface.cmp
if [[ -s $elf_ddir/interface.cmp ]]; then
echo | tee -a $LOGFILE >> $mail_msg_file
tee -a $LOGFILE < \
$elf_ddir/interface.cmp \
>> $mail_msg_file
fi
else
echo "baseline not available. comparison" \
"skipped" | \
tee -a $LOGFILE >> $mail_msg_file
fi
fi
fi
if [[ $r_FLAG = y ]]; then
echo "\n==== Check ELF runtime attributes ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
# If we're doing a DEBUG build the proto area will be left
# with debuggable objects, thus don't assert -s.
if [[ $D_FLAG = y ]]; then
rtime_sflag=""
else
rtime_sflag="-s"
fi
check_rtime -i -m -v $rtime_sflag -o -w $elf_ddir \
-D object_list -f object_list -E runtime.err \
-I runtime.attr.raw
# check_rtime -I output needs to be sorted in order to
# compare it to that from previous builds.
sort $elf_ddir/runtime.attr.raw > $elf_ddir/runtime.attr
rm $elf_ddir/runtime.attr.raw
# Report errors
if [[ -s $elf_ddir/runtime.err ]]; then
tee -a $LOGFILE < $elf_ddir/runtime.err \
>> $mail_msg_file
fi
# If there is an ELF-data directory from a previous build,
# then diff the attr files. These files contain information
# about dependencies, versioning, and runpaths. There is some
# overlap with the ABI checking done above, but this also
# flushes out non-ABI interface differences along with the
# other information.
echo "\n==== Diff ELF runtime attributes" \
"(since last build) ====\n" | \
tee -a $LOGFILE >> $mail_msg_file >> $mail_msg_file
if [[ -f $elf_ddir.ref/runtime.attr ]]; then
diff $elf_ddir.ref/runtime.attr \
$elf_ddir/runtime.attr \
>> $mail_msg_file
fi
fi
# If -u set, copy contents of ELF-data.$MACH to the parent workspace.
if [[ "$u_FLAG" = "y" ]]; then
p_elf_ddir=$PARENT_WS/usr/src/ELF-data.$MACH
# If parent lacks the ELF-data.$MACH directory, create it
if [[ ! -d $p_elf_ddir ]]; then
staffer mkdir -p $p_elf_ddir
fi
# These files are used asynchronously by other builds for ABI
# verification, as above for the -A option. As such, we require
# the file replacement to be atomic. Copy the data to a temp
# file in the same filesystem and then rename into place.
(
cd $elf_ddir
for elf_dfile in *; do
staffer cp $elf_dfile \
${p_elf_ddir}/${elf_dfile}.new
staffer mv -f ${p_elf_ddir}/${elf_dfile}.new \
${p_elf_ddir}/${elf_dfile}
done
)
fi
fi
# DEBUG lint of kernel begins
if [ "$i_CMD_LINE_FLAG" = "n" -a "$l_FLAG" = "y" ]; then
if [ "$LINTDIRS" = "" ]; then
# LINTDIRS="$SRC/uts y $SRC/stand y $SRC/psm y"
LINTDIRS="$SRC y"
fi
set $LINTDIRS
while [ $# -gt 0 ]; do
dolint $1 $2; shift; shift
done
else
echo "\n==== No '$MAKE lint' ====\n" >> $LOGFILE
fi
# "make check" begins
if [ "$i_CMD_LINE_FLAG" = "n" -a "$C_FLAG" = "y" ]; then
# remove old check.out
rm -f $SRC/check.out
rm -f $SRC/check-${MACH}.out
cd $SRC
$MAKE -ek check 2>&1 | tee -a $SRC/check-${MACH}.out >> $LOGFILE
echo "\n==== cstyle/hdrchk errors ====\n" >> $mail_msg_file
grep ":" $SRC/check-${MACH}.out |
egrep -v "Ignoring unknown host" | \
sort | uniq >> $mail_msg_file
else
echo "\n==== No '$MAKE check' ====\n" >> $LOGFILE
fi
echo "\n==== Find core files ====\n" | \
tee -a $LOGFILE >> $mail_msg_file
find $abssrcdirs -name core -a -type f -exec file {} \; | \
tee -a $LOGFILE >> $mail_msg_file
if [ "$f_FLAG" = "y" -a "$build_ok" = "y" ]; then
echo "\n==== Diff unreferenced files (since last build) ====\n" \
| tee -a $LOGFILE >>$mail_msg_file
rm -f $SRC/unref-${MACH}.ref
if [ -f $SRC/unref-${MACH}.out ]; then
mv $SRC/unref-${MACH}.out $SRC/unref-${MACH}.ref
fi
findunref -S $SCM_TYPE -t $SRC/.build.tstamp -s usr $CODEMGR_WS \
${TOOLS}/findunref/exception_list 2>> $mail_msg_file | \
sort > $SRC/unref-${MACH}.out
if [ ! -f $SRC/unref-${MACH}.ref ]; then
cp $SRC/unref-${MACH}.out $SRC/unref-${MACH}.ref
fi
diff $SRC/unref-${MACH}.ref $SRC/unref-${MACH}.out >>$mail_msg_file
fi
#
# Generate the OpenSolaris deliverables if requested. Some of these
# steps need to come after findunref and are commented below.
#
#
# Copy an input crypto tarball to the canonical destination (with
# datestamp), and point the non-stamped symlink at it.
# Usage: copycrypto from_path suffix
# Returns 0 if successful, non-zero if not.
#
function copycrypto {
typeset from=$1
typeset suffix=$2
typeset to=$(cryptodest "$suffix").bz2
typeset -i stat
cp "$from" "$to"
stat=$?
if (( $stat == 0 )); then
cryptolink "$to" "$suffix"
stat=$?
fi
return $stat
}
#
# Pass through the crypto tarball(s) that we were given, putting it in
# the same place that crypto_from_proto puts things.
#
function crypto_passthrough {
echo "Reusing $ON_CRYPTO_BINS for crypto tarball(s)..." >> "$LOGFILE"
if [ "$D_FLAG" = y ]; then
copycrypto "$ON_CRYPTO_BINS" "" >> "$LOGFILE" 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create DEBUG crypto tarball." |
tee -a "$mail_msg_file" >> "$LOGFILE"
fi
fi
if [ "$F_FLAG" = n ]; then
copycrypto $(ndcrypto "$ON_CRYPTO_BINS") "-nd" \
>> "$LOGFILE" 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create non-DEBUG crypto tarball." |
tee -a "$mail_msg_file" >> "$LOGFILE"
fi
fi
}
# If we are doing an OpenSolaris _source_ build (-S O) then we do
# not have usr/closed available to us to generate closedbins from,
# so skip this part.
if [ "$SO_FLAG" = n -a "$O_FLAG" = y -a "$build_ok" = y ]; then
echo "\n==== Generating OpenSolaris tarballs ====\n" | \
tee -a $mail_msg_file >> $LOGFILE
cd $CODEMGR_WS
#
# This step grovels through the package manifests, so it
# must come after findunref.
#
# We assume no DEBUG vs non-DEBUG package content variation
# here; if that changes, then the "make all" in $SRC/pkg will
# need to be moved into the conditionals and repeated for each
# different build.
#
echo "Generating closed binaries tarball(s)..." >> $LOGFILE
closed_basename=on-closed-bins
if [ "$D_FLAG" = y ]; then
bindrop "$closed_basename" >>"$LOGFILE" 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create DEBUG closed binaries." |
tee -a $mail_msg_file >> $LOGFILE
fi
fi
if [ "$F_FLAG" = n ]; then
bindrop -n "$closed_basename-nd" >>"$LOGFILE" 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create non-DEBUG closed binaries." |
tee -a $mail_msg_file >> $LOGFILE
fi
fi
echo "Generating README.opensolaris..." >> $LOGFILE
cat $SRC/tools/opensolaris/README.opensolaris.tmpl | \
mkreadme_osol $CODEMGR_WS/README.opensolaris >> $LOGFILE 2>&1
if (( $? != 0 )) ; then
echo "Couldn't create README.opensolaris." |
tee -a $mail_msg_file >> $LOGFILE
fi
if [ -n "$ON_CRYPTO_BINS" ]; then
crypto_passthrough
fi
fi
# Verify that the usual lists of files, such as exception lists,
# contain only valid references to files. If the build has failed,
# then don't check the proto area.
CHECK_PATHS=${CHECK_PATHS:-y}
if [ "$CHECK_PATHS" = y -a "$N_FLAG" != y ]; then
echo "\n==== Check lists of files ====\n" | tee -a $LOGFILE \
>>$mail_msg_file
arg=-b
[ "$build_ok" = y ] && arg=
checkpaths $arg $checkroot 2>&1 | tee -a $LOGFILE >>$mail_msg_file
fi
if [ "$M_FLAG" != "y" -a "$build_ok" = y ]; then
echo "\n==== Impact on file permissions ====\n" \
>> $mail_msg_file
abspkgdefs=
abspkg=
for d in $abssrcdirs; do
if [ -d "$d/pkgdefs" ]; then
abspkgdefs="$abspkgdefs $d"
fi
if [ -d "$d/pkg" ]; then
abspkg="$abspkg $d"
fi
done
if [ -n "$abspkgdefs" ]; then
pmodes -qvdP \
`find $abspkgdefs -name pkginfo.tmpl -print -o \
-name .del\* -prune | sed -e 's:/pkginfo.tmpl$::' | \
sort -u` >> $mail_msg_file
fi
if [ -n "$abspkg" ]; then
for d in "$abspkg"; do
( cd $d/pkg ; $MAKE -e pmodes ) >> $mail_msg_file
done
fi
fi
if [ "$w_FLAG" = "y" -a "$build_ok" = "y" ]; then
if [[ "$MULTI_PROTO" = no || "$D_FLAG" = y ]]; then
do_wsdiff DEBUG $ROOT.prev $ROOT
fi
if [[ "$MULTI_PROTO" = yes && "$F_FLAG" = n ]]; then
do_wsdiff non-DEBUG $ROOT-nd.prev $ROOT-nd
fi
fi
END_DATE=`date`
echo "==== Nightly $maketype build completed: $END_DATE ====" | \
tee -a $LOGFILE >> $build_time_file
typeset -i10 hours
typeset -Z2 minutes
typeset -Z2 seconds
elapsed_time=$SECONDS
((hours = elapsed_time / 3600 ))
((minutes = elapsed_time / 60 % 60))
((seconds = elapsed_time % 60))
echo "\n==== Total build time ====" | \
tee -a $LOGFILE >> $build_time_file
echo "\nreal ${hours}:${minutes}:${seconds}" | \
tee -a $LOGFILE >> $build_time_file
if [ "$u_FLAG" = "y" -a "$f_FLAG" = "y" -a "$build_ok" = "y" ]; then
staffer cp ${SRC}/unref-${MACH}.out $PARENT_WS/usr/src/
#
# Produce a master list of unreferenced files -- ideally, we'd
# generate the master just once after all of the nightlies
# have finished, but there's no simple way to know when that
# will be. Instead, we assume that we're the last nightly to
# finish and merge all of the unref-${MACH}.out files in
# $PARENT_WS/usr/src/. If we are in fact the final ${MACH} to
# finish, then this file will be the authoritative master
# list. Otherwise, another ${MACH}'s nightly will eventually
# overwrite ours with its own master, but in the meantime our
# temporary "master" will be no worse than any older master
# which was already on the parent.
#
set -- $PARENT_WS/usr/src/unref-*.out
cp "$1" ${TMPDIR}/unref.merge
shift
for unreffile; do
comm -12 ${TMPDIR}/unref.merge "$unreffile" > ${TMPDIR}/unref.$$
mv ${TMPDIR}/unref.$$ ${TMPDIR}/unref.merge
done
staffer cp ${TMPDIR}/unref.merge $PARENT_WS/usr/src/unrefmaster.out
fi
#
# All done save for the sweeping up.
# (whichever exit we hit here will trigger the "cleanup" trap which
# optionally sends mail on completion).
#
if [ "$build_ok" = "y" ]; then
exit 0
fi
exit 1