#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
#
import atexit
import calendar
import collections
import copy
import datetime
import errno
import hashlib
import os
import platform
import shutil
import six
import stat
import sys
import tempfile
import time
"""An Image object is a directory tree containing the laid-down contents
of a self-consistent graph of Packages.
An Image has a root path.
An Image of type IMG_ENTIRE does not have a parent Image. Other Image
types must have a parent Image. The external state of the parent Image
must be accessible from the Image's context, or duplicated within the
Image (IMG_PARTIAL for zones, for instance).
The parent of a user Image can be a partial Image. The parent of a
partial Image must be an entire Image.
An Image of type IMG_USER stores its external state at self.root +
".org.opensolaris,pkg".
An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at
An Image needs to be able to have a different repository set than the
system's root Image.
For image format details, see section 5.3 of doc/on-disk-format.txt
in the pkg(7) gate.
"""
# Class constants
if should_exist:
assert(imgtype is None)
assert(not force)
else:
assert(imgtype is not None)
# Alternate package sources.
self.__alt_pkg_pub_map = None
self.__alt_pubs = None
self.__alt_known_cat = None
# Determine identity of client executable if appropriate.
if cmdpath == None:
# prevent brokeness in the test suite
raise RuntimeError("""
An Image object was allocated from within ipkg test suite and
cmdpath was not explicitly overridden. Please make sure to
explicitly set cmdpath when allocating an Image object, or
override cmdpath when allocating an Image object by setting PKG_CMDPATH
in the environment or by setting simulate_cmdpath in DebugValues.""")
# Indicates whether automatic image format upgrades of the
# on-disk format are allowed.
# Must happen after upgraded assignment.
self.img_prefix = None
# Can have multiple read cache dirs...
self.__read_cache_dirs = []
# ...but only one global write cache dir and incoming write dir.
self.__write_cache_dir = None
self.__user_cache_dir = None
self._incoming_cache_dir = None
# instead of a flat cache.
self.__write_cache_root = None
self.__lockfile = None
self.__sig_policy = None
self.__trust_anchors = None
self.__bad_trust_anchors = []
# cache for presence of boot-archive
self.__boot_archive = None
# When users and groups are added before their database files
# have been installed, the actions store them temporarily in the
# image, in these members.
self._usersbyname = {}
self._groupsbyname = {}
# Set of pkg stems being avoided per configuration.
self.__avoid_set = None
# Set of pkg stems being avoided by solver due to dependency
# constraints (not administrative action).
self.__implicit_avoid_set = None
# set of pkg stems subject to group
# dependency but removed because obsolete
self.__group_obsolete = None
# The action dictionary that's returned by __load_actdict.
self.__actdict_timestamp = None
# Transport operations for this image
if should_exist:
else:
# ignore .zfs snapdir if it's present
raise apx.CreatingImageInNonEmptyDir(
# set umask to proper value to prevent problems w/ overly
# locked down umask.
"""Returns a boolean value indicating whether the named catalog
has already been loaded. This is intended to be used as an
optimization function to determine which catalog to request."""
"""Initializes default catalog state. Actual data is provided
on demand via get_catalog()"""
# This is used to cache image catalogs.
self.__catalogs = {}
"""The absolute path of the image's metadata."""
"""A boolean value indicating whether the image is currently
locked."""
"""The absolute path of the image's location."""
"""The current signature policy for this image."""
if self.__sig_policy is not None:
return self.__sig_policy
"signature-required-names")
return self.__sig_policy
"""A dictionary mapping subject hashes for certificates this
image trusts to those certs. The image trusts the trust anchors
in its trust_anchor_dir and those in the image from which the
client was run."""
if self.__trust_anchors is not None:
return self.__trust_anchors
pkg_trust_anchors = {}
"anchors for the image were expected to be found "
"in {0}, but that is not a directory. Please set "
"the image property 'trust-anchor-directory' to "
self.__trust_anchors = {}
if loc_is_dir:
continue
try:
trusted_ca = \
raw, default_backend())
except (ValueError, IOError) as e:
else:
# We store certificates internally by
# the SHA-1 hash of its subject.
for s in pkg_trust_anchors:
if s not in self.__trust_anchors:
return self.__trust_anchors
"""A list of strings decribing errors encountered while parsing
trust anchors."""
return [_("{path} is expected to be a certificate but could "
"not be parsed. The error encountered "
for p, e in self.__bad_trust_anchors
]
"""The path to the filesystem that holds the write cache--used
to compute whether sufficent space is available for
downloads."""
return self.__user_cache_dir or \
"""Helper method for executing an image-modifying operation
that needs locking. It automatically handles calling
log_operation_start and log_operation_end by default. Locking
behaviour is controlled by the blocking_locks image property.
'allow_unprivileged' is an optional boolean value indicating
that permissions-related exceptions should be ignored when
attempting to obtain the lock as the related operation will
still work correctly even though the image cannot (presumably)
be modified.
'new_history_op' indicates whether we should handle history
operations.
"""
error = None
try:
if new_history_op:
yield
except apx.ImageLockedError as e:
# Don't unlock the image if the call failed to
# get the lock.
error = e
raise
except Exception as e:
error = e
raise
else:
finally:
if new_history_op:
"""Locks the image in preparation for an image-modifying
operation. Raises an ImageLockedError exception on failure.
Locking behaviour is controlled by the blocking_locks image
property.
'allow_unprivileged' is an optional boolean value indicating
that permissions-related exceptions should be ignored when
attempting to obtain the lock as the related operation will
still work correctly even though the image cannot (presumably)
be modified.
"""
# First, attempt to obtain a thread lock.
raise apx.ImageLockedError()
try:
# Attempt to obtain a file lock.
except EnvironmentError as e:
exc = None
return
e.filename)
else:
raise
if exc and not allow_unprivileged:
raise exc
except:
# If process lock fails, ensure thread lock is released.
raise
"""Unlocks the image."""
try:
if self.__lockfile:
finally:
"""Returns the type of image at directory: d; or None"""
rv = None
# First check for new image configuration file.
"pkg5.image")):
# Regardless of directory structure, assume
# this is an image for now.
return True
"cfg_cache")):
# For older formats, if configuration is
# missing, this can't be an image.
return False
# Configuration exists, but for older formats,
# all of these directories have to exist.
for n in ("state", "pkg"):
n)):
return False
return True
is_image(d, img_user_prefix):
is_image(d, img_root_prefix):
rv = IMG_ENTIRE
return rv
# Ascend from the given directory d to find first
# encountered image. If exact_match is true, if the
# image found doesn't match startd, raise an
# ImageNotFoundException.
startd = d
# eliminate problem if relative path such as "." is passed in
while True:
if exact_match and \
raise apx.ImageNotFoundException(
exact_match, startd, d)
return
# XXX follow symlinks or not?
oldpath = d
# Make sure we are making progress and aren't in an
# infinite loop.
#
# (XXX - Need to deal with symlinks here too)
if d == oldpath:
raise apx.ImageNotFoundException(
exact_match, startd, d)
"""Load this image's cached configuration from the default
location. This function should not be called anywhere other
than __set_dirs()."""
# configuration.
raise RuntimeError("self.root must be set")
version = None
# Configuration version is currently 3
# for all v3 images and newer.
version = 3
if self.__upgraded:
"""Normalizes publisher SSL configuration data, storing any
certificate files as needed in the image's SSL directory. This
logic is performed here in the image instead of ImageConfig as
it relies on special knowledge of the image structure."""
try:
# If SSL file doesn't exist (for
# whatever reason), then don't update
# configuration. (Let the failure
# happen later during an operation
# that requires the file.)
return
except EnvironmentError as e:
raise apx._convert_error(e)
# Ensure ssl_dir exists; makedirs handles any errors.
try:
# Destination name is based on digest of file.
# In order for this image to interoperate with
# older and newer clients, we must use sha-1
# here.
# Ensure file can be read by unprivileged users.
except EnvironmentError as e:
raise apx._convert_error(e)
return dest
# self.cfg.publishers is used because gen_publishers
# includes temporary publishers and this is only for
# configured ones.
if not repo:
continue
# Store and normalize ssl_cert and ssl_key.
if pval:
if not pval:
continue
# Store path as absolute to image root,
# it will be corrected on load to match
# actual image location if needed.
"""Should be called every time image configuration is loaded;
ensure ssl_cert and ssl_key properties of publisher repository
URI objects match current image location."""
# self.cfg.publishers is used because gen_publishers
# includes temporary publishers and this is only for
# configured ones.
if not repo:
continue
if not pval:
continue
continue
# If special image directory is part
# of path, then assume path should be
# rewritten to match current image
# location.
called after any image modification has completed. This
provides a public interface for programs that want to monitor
the image for modifications via event ports, etc."""
try:
misc.PKG_FILE_MODE)) as f:
except EnvironmentError as e:
raise apx._convert_error(e)
# First, create the image directories if they haven't been, so
# the configuration file can be written.
# Remove the old the pkg.sysrepo(8) cache, if present.
try:
except EnvironmentError as e:
raise apx._convert_error(e)
if self.is_liveroot() and \
"svc:/application/pkg/system-repository:default") in \
"svc:/application/pkg/system-repository:default"])
# This ensures all old transport configuration is thrown away.
"""Create any missing parts of the image's directory structure.
'root' is an optional path to a directory to create the new
image structure in. If not provided, the current image
directory is the default.
'version' is an optional integer value indicating the version
of the structure to create. If not provided, the current image
version is the default.
"""
if not root:
if not version:
else:
"index", "lost+found", "pkg", "publisher",
try:
except EnvironmentError as e:
raise apx._convert_error(e)
# Ensure upgraded status is reset.
if startd == None:
raise RuntimeError(
"Live root image access is disabled but was \
else:
# Use a new Transport object every time location is changed.
# cleanup specified path
try:
except Exception as e:
# If current directory can't be obtained for any
# reason, ignore the error.
cwd = None
try:
except EnvironmentError as e:
raise apx._convert_error(e)
finally:
if cwd:
# If current image is locked, then it should be unlocked
# and then relocked after the imgdir is changed. This
# ensures that alternate BE scenarios work.
if relock:
# Must set imgdir first.
# Force a reset of version.
# Assume version 4+ configuration location.
# In the case of initial image creation, purge is specified
# to ensure that when an image is created over an existing
# one, any old data is removed first.
if entry == "ssl":
# Preserve certs and keys directory
# as a special exception.
continue
try:
else:
except EnvironmentError as e:
raise apx._convert_error(e)
elif not purge:
# Determine if the version 4 configuration file exists.
"cfg_cache")
# Load the image configuration.
if not purge:
try:
"version"))
# If version couldn't be read from
# configuration, then allow fallback
# path below to set things right.
# If version doesn't exist, attempt to determine version
# based on structure.
if purge:
# This is a new image.
else:
# Format is too old or invalid.
# Image is too new or too old.
# Ensure image version matches determined one; this must
# be set *after* the version checks above.
# Remaining dirs may now be set.
if relock:
# Setup cache directories.
self.__read_cache_dirs = []
self._incoming_cache_dir = None
self.__user_cache_dir = None
self.__write_cache_dir = None
self.__write_cache_root = None
# The user specified cache is used as an additional place to
# read cache data from, but as the only place to store new
# cache data.
# get_cachedirs() will build paths for each publisher's
# cache using this directory.
# If set, cache is a flat structure that is used for
# all publishers.
# Since the cache structure is flat, add it to the
# list of global read caches.
if self.__user_cache_dir:
if not self._incoming_cache_dir:
# Only a global incoming cache exists for newer images.
# Test if we have the permissions to create the cache
# incoming directory in this hierarchy. If not, we'll need to
# move it somewhere else.
try:
except EnvironmentError as e:
# There's no image cleanup hook, so we'll just
# remove this directory on process exit.
else:
# Forcibly discard image catalogs so they can be re-loaded
# from the new location if they are already loaded. This
# also prevents scribbling on image state information in
# the wrong location.
# Upgrade the image's format if needed.
# If we haven't loaded the system publisher configuration, do
# that now.
# Check to see if any system publishers have been changed.
# If so they need to be refreshed, so clear last_refreshed.
p.last_refreshed = None
# Check to see if any system publishers have been
# removed. If they have, remove their metadata and
# rebuild the catalogs.
try:
except apx.PermissionsException:
pass
if changed:
# we delay writing out any new system repository configuration
# until we've updated on on-disk catalog state. (otherwise we
# could lose track of syspub publishers changes and either
# return stale catalog information, or not do refreshes when
# we need to.)
if purge:
# Configuration shouldn't be written again unless this
# is an image creation operation (hence the purge).
# Let the linked image subsystem know that root is moving
# load image avoid pkg set
"""Transform the existing image structure and its data to
the newest format. Callers are responsible for locking.
'allow_unprivileged' is an optional boolean indicating
whether a fallback to an in-memory only upgrade should
be performed if a PermissionsException is encountered
during the operation.
'progtrack' is an optional ProgressTracker object.
"""
# Already upgraded.
# If pre-upgrade data still exists; fire off a
# process to dump it so execution can continue.
# Ensure all output is discarded; it really
# doesn't matter if this succeeds.
return False
"""Creates a new image with the given attributes if it does not
exist; should not be used with an existing image.
'is_zone' is a boolean indicating whether the image is a zone.
'pubs' is a list of Publisher objects to configure the image
with.
'refresh_allowed' is an optional boolean indicating that
network operations (such as publisher data retrieval) are
allowed.
'progtrack' is an optional ProgressTracker object.
'props' is an option dictionary mapping image property names to
values.
'variants' is an optional dictionary of variant names and
values.
'facets' is an optional dictionary of facet names and values.
"""
for p in pubs:
# Override any initial configuration information.
# Start the operation.
# Determine and add the default variants for the image.
if is_zone:
"nonglobal"
else:
"global"
# After setting up the default variants, add any overrides or
# additional variants or facets specified.
# Now everything is ready for publisher configuration.
# Since multiple publishers are allowed, they are all
# added at once without any publisher data retrieval.
# A single retrieval is then performed afterwards, if
# allowed, to minimize the amount of work the client
# needs to perform.
for p in pubs:
if refresh_allowed:
else:
# initialize empty catalogs on disk
# Ensure publisher search order is written.
def __allow_liveroot():
"""Check if we're allowed to access the current live root
image."""
# if we're simulating a live root then allow access to it
return True
# check if the user disabled access to the live root
return False
return False
# by default allow access to the live root
return True
"nonglobal"
"""Returns True if a boot_archive is present in this image"""
if self.__boot_archive is not None:
return self.__boot_archive
for p in ["platform/i86pc/amd64/boot_archive",
break
else:
return self.__boot_archive
"""return the filelist... add the filelist so we rebuild
boot archive if it changes... append trailing / to
directories that are really there"""
p = "boot/solaris/filelist.ramdisk"
return path + "/"
return path
return []
"""Returns a list of tuples of the form (dir, readonly, pub,
layout) where 'dir' is the absolute path of the cache directory,
'readonly' is a boolean indicating whether the cache can
be written to, 'pub' is the prefix of the publisher that
the cache directory should be used for, and 'layout' is a
FileManager object used to access file content in the cache.
If 'pub' is None, the cache directory is intended for all
publishers. If 'layout' is None, file content layout can
vary.
"""
file_layout = None
# Assume cache directories are in V1 Layout.
# Get all readonly cache directories.
cdirs = [
]
# Get global write cache directory.
# For images newer than version 3, file data can be stored
# in the publisher's file root.
# Cache is a tree structure like
return cdirs
"""Return the UTC time of the image's last state change or
None if unknown. By default the time is returned via datetime
object. If 'string' is true and a time is available, then the
time is returned as a string (instead of as a datetime
object)."""
# Always get last_modified time from known catalog. It's
# retrieved from the catalog itself since that is accurate
# down to the micrsecond (as opposed to the filesystem which
# has an OS-specific resolution).
return rv
alt_pubs = {}
alt_src_pubs = dict(
(p.prefix, p)
for p in self.__alt_pubs
)
# Include alternate package source publishers
# in result, and temporarily enable any
# disabled publishers that already exist in
# the image configuration.
try:
# No override needed.
continue
# Discard origins and mirrors to prevent
# their accidental use.
except KeyError:
publishers = [
]
if p not in publishers
))
for pub in publishers:
# Prepare publishers for transport usage; this must be
# done each time so that information reflects current
# image state. This is done whether or not the
# publisher is returned so that in-memory state is
# always current.
yield pub
"""Return dictionary of configured + enabled publishers and
unconfigured publishers which still have packages installed.
Each entry contains a tuple of search order index starting at
0, and a boolean indicating whether or not this publisher is
"sticky", and a boolean indicating whether or not the
publisher is enabled"""
])
# Add any publishers for pkgs that are installed,
# but have been deleted. These publishers are implicitly
# not-sticky and disabled.
return ret
"""Return the highest ranked publisher."""
"publisher-search-order")
if pubs:
for p in self.gen_publishers():
return p
for p in self.get_installed_pubs():
return None
"""Validate the certificates of the specified publishers.
Raise an exception if any of the certificates has expired or
is close to expiring."""
if not pubs:
errors = []
for p in pubs:
r = p.repository
try:
except apx.ExpiredCertificate as e:
try:
publisher=p,
except EnvironmentError as e:
raise apx._convert_error(e)
if errors:
"""Returns a boolean value indicating whether a publisher
exists in the image configuration that matches the given
prefix or alias."""
return True
return False
"""Removes the publisher with the matching identity from the
image."""
if not progtrack:
"""Return a dictionary of configured publishers. This doesn't
include unconfigured publishers which still have packages
installed."""
return dict(
(p.prefix, p)
)
"""Return a list of configured publishers sorted by rank.
This doesn't include unconfigured publishers which still have
packages installed."""
"publisher-search-order")
#
# If someone has been editing the config file we may have
# unranked publishers. Also, as publisher come and go via the
# sysrepo we can end up with configured but unranked
# publishers. In either case just sort unranked publishers
# alphabetically.
#
ret = [
d[n]
for n in names
if n in d
] + [
d[n]
]
return ret
return pub
return pub
return pub
raise apx.UnknownPublisher(None)
"""Moves publisher "being_moved" to before "staying_put"
in search order.
The caller is responsible for locking the image."""
"""Moves publisher "being_moved" to after "staying_put"
in search order.
The caller is responsible for locking the image."""
# No alternate sources to merge.
return
# Temporarily merge the package metadata in the alternate
# known package catalog for packages not listed in the
# image's known catalog.
# Not interesting; already installed.
return False, None
if not img_entry is None:
# Already in image known catalog.
return False, None
"""Private helper function to cleanup package certificate
information after use of temporary package data."""
if not self.__alt_pubs:
return
# Cleanup publisher cert information; any certs not retrieved
# retrieved during temporary publisher use need to be expunged
# from the image configuration.
try:
except KeyError:
# Nothing to do.
continue
"""Specifies an alternate source of package metadata to be
temporarily merged with image state so that it can be used
as part of packaging operations."""
if not alt_sources:
self.__alt_pkg_pub_map = None
self.__alt_pubs = None
self.__alt_known_cat = None
return
elif self.__alt_pkg_sources_loaded:
# Ensure existing alternate package source data
# is not part of temporary image state.
pub=None):
"""Sets the preferred publisher for packaging operations.
'prefix' is an optional string value specifying the name of
a publisher; ignored if 'pub' is provided.
'alias' is an optional string value specifying the alias of
a publisher; ignored if 'pub' is provided.
'pub' is an optional Publisher object identifying the
publisher to set as the preferred publisher.
One of the above parameters must be provided.
The caller is responsible for locking the image."""
if not pub:
"is a system publisher and cannot be "
relative = None
for p in pubs:
# If we've gotten to the publisher we want to make
# highest ranked, then there's nothing to do because
# it's already as high as it can be.
if p == pub:
return
relative = p
break
assert relative, "Expected {0} to already be part of the " + \
try:
return True
except cfg.ConfigError:
return False
"""Destroys the image; image object should not be used
afterwards."""
return
# Paranoia.
return
try:
except EnvironmentError as e:
raise apx._convert_error(e)
"""Adds the provided publisher object to the image
configuration.
'refresh_allowed' is an optional, boolean value indicating
whether the publisher's metadata should be retrieved when adding
it to the image's configuration.
'progtrack' is an optional ProgressTracker object."""
# Ensure that if the publisher's meta directory already
# exists for some reason that the data within is not
# used.
try:
# First, verify that the publisher has a
# valid pkg(7) repository.
except Exception as e:
# Remove the newly added publisher since
# it is invalid or the retrieval failed.
raise
except:
# Remove the newly added publisher since
# the retrieval failed.
raise
"""Private version of add_publisher(); caller is responsible
for locking."""
assert (not search_after and not search_before) or \
(not search_after and not search_first) or \
(not search_before and not search_first)
if not progtrack:
# Must assign this first before performing operations.
# Before continuing, validate SSL information.
try:
except apx.ExpiringCertificate as e:
for ca in approved_cas:
try:
except EnvironmentError as e:
raise apx.MissingFileArgumentException(
ca)
raise apx._convert_error(e)
for hsh in revoked_cas:
if search_first:
elif search_before:
elif search_after:
# Only after success should the configuration be saved.
overlaypaths=None, **kwargs):
"""Generator that returns a tuple of the form (action, errors,
warnings, info) if there are any error, warning, or other
messages about an action contained within the specified
package. Where the returned messages are lists of strings
indicating fatal problems, potential issues (that can be
ignored), or extra information to be displayed respectively.
'fmri' is the fmri of the package to verify.
'progresstracker' is a ProgressTracker object.
'verifypaths' is the set of paths to verify.
'overlaypaths' is the set of overlaying path to verify.
'kwargs' is a dict of additional keyword arguments to be passed
to each action verification routine."""
try:
except apx.UnknownPublisher:
# Since user removed publisher, assume this is the same
# as if they had set signature-policy ignore for the
# publisher.
sig_pol = None
else:
if not path_only:
# Only perform signature verification logic if there are
# signatures or if signature-policy is not 'ignore'.
try:
# Signature verification must be done using all
# the actions from the manifest, not just the
# ones for this image's variants.
"check-certificate-revocation"))
except apx.SigningException as e:
yield e.sig, [e], [], []
except apx.InvalidResourceLocation as e:
yield None, [e], [], []
"""Helper function to determine if the mediation
delivered by a link is allowed. If it is, then
the link should be verified. (Yes, this does mean
that the non-existence of links is not verified.)
"""
# Link isn't mediated or mediation is unknown.
return True
"version")
"implementation")
if med_version:
return med_version == cfg_med_version and \
# pkg verify only looks at actions that have not been dehydrated.
if dehydrate:
not mediation_allowed(act):
# Link doesn't match configured
# mediation, so shouldn't be verified.
continue
errors = []
warnings = []
info = []
if not path_only:
if path in verifypaths:
# It's safe to immediately discard this
# match as only one action can deliver a
# path with overlay=allow and only one with
# overlay=true.
# Verify that file that is faceted out does not
# exist. Exclude actions which may be delivered
# from multiple packages.
_("File should not exist"))
else:
# Action that is not applicable to image variant
# or has been dehydrated.
continue
"""update variants in image config"""
if new_variants is not None:
if new_facets is not None:
if new_mediators is not None:
"""Verify a manifest. The caller must supply the FMRI
for the package in 'fmri', as well as the path to the
manifest file that will be verified."""
try:
except InvalidContentException:
return False
"""Check to see if the manifest for pfmri is present on disk and
has the correct hash."""
if not on_disk or \
return on_disk
return False
"""Return path to package license directory."""
# Version 4+ images store license files per-stem, instead of
# per-stem and version, so that transitions between package
# versions don't require redelivery of license files.
"""Returns the publisher for the FMRI of an installed package
or None if the package is not installed.
"""
for f in self.gen_installed_pkgs():
return f.publisher
return None
"""Return path to on-disk manifest cache directory."""
# Needed for consumers such as search that don't provide
# publisher information.
"""Return path to on-disk manifest file."""
# Needed for consumers such as search that don't provide
# publisher information.
alt_pub=None):
"""Find on-disk manifest and create in-memory Manifest
object.... grab from server if needed"""
try:
raise KeyError
# if we have a intent string, let depot
# know for what we're using the cached manifest
if intent:
alt_repo = None
if alt_pub:
try:
except (apx.UnknownPublisher,
# It's not fatal if we can't find
# or reach the publisher.
pass
except KeyError:
return ret
alt_pub=None):
"""return manifest; uses cached version if available.
ignore_excludes controls whether manifest contains actions
for all variants
If 'ignore_excludes' is set to True, then all actions in the
manifest are included, regardless of variant or facet tags. If
set to False, then the variants and facets currently set in the
image will be applied, potentially filtering out some of the
actions."""
# Normally elide other arch variants, facets
if ignore_excludes:
else:
try:
except apx.ActionExecutionError as e:
raise
raise apx.InvalidPackageErrors([e])
return m
"""Sets the recorded installed state of each package pair in
'pkg_pairs'. 'pkg_pair' should be an iterable of tuples of
the format (added, removed) where 'removed' is the FMRI of the
package that was uninstalled, and 'added' is the package
installed for the operation. These pairs are representative of
the destination and origin package for each part of the
operation."""
updated = {}
continue
if add_pkg:
if rem_pkg:
"metadata", {}))
# 'Updating package state database'
"last-install")
if last_install:
mdata["last-install"] = \
mdata["last-update"] = \
else:
mdata["last-install"] = \
else:
# This entry is no longer available and has no
# meaningful state information, so should be
# discarded.
continue
# Catalog format only supports lists.
# Now record the package state.
# If the package is being marked as installed,
# then it shouldn't already exist in the
# installed catalog and should be added.
# Discard entries for alternate source packages that weren't
# installed as part of the operation.
# Nothing to do.
continue
if not entry:
# The only reason that the entry should
# not exist in the 'known' part is
# because it was removed during the
# operation.
continue
# Now add the publishers of packages that were installed
# from temporary sources that did not previously exist
# to the image's configuration. (But without any
# origins, sticky, and enabled.)
# List of publishers that need to be added is the
# intersection of installed and alternate minus
# the already configured.
# Sort the set to get a deterministic output.
# Ensure image configuration reflects new information.
# Remove manifests of packages that were removed from the
# system. Some packages may have only had facets or
# variants changed, so don't remove those.
# 'Updating package cache'
# Remove package cache directory if possible; we don't
# care if it fails.
try:
except:
pass
try:
except EnvironmentError as e:
raise apx._convert_error(e)
# Remove package manifest directory if possible; we
# don't care if it fails.
try:
except:
pass
# Temporarily redirect the catalogs to a different location,
# so that if the save is interrupted, the image won't be left
# with invalid state, and then save them.
try:
# Must copy the old catalog data to the new
# destination as only changed files will be
# written.
# copy any other state files from current state
# dir into new state dir.
# Next, preserve the old installed state dir, rename the
# new one into place, and then remove the old one.
except EnvironmentError as e:
# shutil.Error can contains a tuple of lists of errors.
# Some of the error entries may be a tuple others will
# be a string due to poor error handling in shutil.
msg = ""
entry[-1])
else:
raise apx._convert_error(e)
finally:
# Regardless of success, the following must happen.
"""Returns the requested image catalog.
'name' must be one of the following image constants:
IMG_CATALOG_KNOWN
The known catalog contains all of packages that are
installed or available from a publisher's repository.
IMG_CATALOG_INSTALLED
The installed catalog is a subset of the 'known'
catalog that only contains installed packages."""
raise RuntimeError("self.imgdir must be set")
if not cat:
# Apply alternate package source data every time that
# the known catalog is requested.
return cat
"""Private method to retrieve catalog; this bypasses the
normal automatic caching (unless the image hasn't been
upgraded yet)."""
try:
except EnvironmentError as e:
# Allow operations to work for
# unprivileged users.
croot = None
raise
# batch_mode is set to True here as any operations that modify
# the catalogs (add or remove entries) are only done during an
# image upgrade or metadata refresh. In both cases, the catalog
# is resorted and finalized so this is always safe to use.
return cat
"""Removes all image catalogs and their directories."""
"""Returns an fmri of the installed package matching the
package stem of the given fmri or None if no match is found."""
return fmris[0]
return None
"""Returns the repository object containing the origins that
should be used to retrieve the specified package or None if
it can be retrieved from all sources or is not a known package.
"""
if entry is None:
# Package not known.
return
try:
except KeyError:
# Can be retrieved from any source.
return
else:
if not slist:
# Can be retrieved from any source.
return
try:
except apx.UnknownPublisher:
# The source where the package was last found was
# recorded, but the publisher is no longer configured;
# return so that caller can fallback to default
# behaviour.
return
norigins = [
]
if not norigins:
# Known sources don't match configured; return so that
# caller can fallback to default behaviour.
return
return repo
"""Returns the list of states a package is in for this image."""
if entry is None:
return []
"""Returns a boolean value indicating whether the specified
package is installed."""
# Avoid loading the installed catalog if the known catalog
# is already loaded. This is safe since the installed
# catalog is a subset of the known, and a specific entry
# is being retrieved.
else:
if entry is None:
return False
"""Generate a list of callables that each return True if an
action is to be included in the image using the currently
defined variants & facets for the image, or an updated set if
new_variants or new_facets are specified."""
if new_variants:
else:
if new_facets is not None:
else:
""" return a copy of the current image variants"""
""" Return a copy of the current image facets"""
"""Return the path to a flag file indicating that the image
catalog is being updated."""
"""Called when we start updating the image catalog. Normally
returns False, but will return True if a previous update was
interrupted."""
# get the path to the image catalog update flag file
# if the flag file exists a previous update was interrupted so
# return 1
return True
# create the flag file and return 0
try:
except EnvironmentError as e:
raise apx.ReadOnlyFileSystemException(
e.filename)
raise
return False
"""Called when we're done updating the image catalog."""
# get the path to the image catalog update flag file
# delete the flag file.
try:
except EnvironmentError as e:
raise apx.ReadOnlyFileSystemException(
e.filename)
raise
"""Rebuilds the image catalogs based on the available publisher
catalogs."""
if not progtrack:
# Mark all operations as occurring at this time.
# The image catalogs need to be updated, but this is a bit
# tricky as previously known packages must remain known even
# if PKG_STATE_KNOWN is no longer true if any other state
# information is present. This is to allow freezing, etc. of
# package states on a permanent basis even if the package is
# no longer available from a publisher repository. However,
# this is only True of installed packages.
# batch_mode is set to True here since without it, catalog
# population time is almost doubled (since the catalog is
# re-sorted and stats are generated for every operation).
# In addition, the new catalog is first created in a new
# temporary directory so that it can be moved into place
# at the very end of this process (to minimize the chance
# that failure or interruption will cause the image to be
# left in an inconsistent state).
# Copy any regular files placed in the state directory
if p == self.__STATE_UPDATING_FILE:
# don't copy the state updating file
continue
# XXX if any of the below fails for any reason, the old 'known'
# catalog needs to be re-loaded so the client is in a consistent
# state.
# All enabled publisher catalogs must be processed.
# XXX For backwards compatibility, 'upgradability' of packages
# is calculated and stored based on whether a given pkg stem
# matches the newest version in the catalog. This is quite
# expensive (due to overhead), but at least the cost is
# consolidated here. This comparison is also cross-publisher,
# as it used to be. In the future, it could likely be improved
# by usage of the SAT solver.
newest = {}
None))
# Next, copy all of the entries for the catalog parts that
# currently exist into the image 'known' catalog.
# Iterator for source parts.
sparts = (
)
# Build list of installed packages based on actual state
# information just in case there is a state issue from an
# older client.
inst_stems = {}
continue
# Create the new installed catalog in a temporary location.
frozen_pkgs = dict([
])
# 'spart' is the source part.
if spart is None:
# Client hasn't retrieved this part.
continue
# New known part.
# Avoid accessor overhead since these will be
# used for every entry.
if pub in inst_stems and \
# copy() is too slow here and catalog entries
# are shallow so this should be sufficient.
if not base:
# Nothing else to do except add the
# entry for non-base catalog parts.
if installed:
continue
# Only the base catalog part stores package
# Assume V1 catalog source.
if installed:
# Check if the package is frozen.
if stem in frozen_pkgs:
# Determine if package is obsolete or has been
# renamed and mark with appropriate state.
dpent = None
if dp is not None:
if dpent is not None:
for a in dpent["actions"]:
# Constructing action objects
# for every action would be a
# lot slower, so a simple string
# match is done first so that
# only interesting actions get
# constructed.
if not a.startswith("set"):
continue
if not ("pkg.obsolete" in a or \
"pkg.renamed" in a):
continue
try:
# If the action can't be
# parsed or is not yet
# supported, continue.
continue
continue
if not act.include_this(
continue
# Add base entries.
if installed:
# Now add installed packages to list of known packages using
# previous state information. While doing so, track any
# new entries as the versions for the stem of the entry will
# need to be passed to finalize() for sorting.
final_fmris = []
# Old installed part.
# New known part.
# New installed part.
mdata = None
if pub not in inst_stems or \
# Entry is no longer valid or is already
# known.
continue
if base:
None))
if not nver or \
elif snver is not None:
# Check if the package is frozen.
if stem in frozen_pkgs:
else:
# Add entries.
# Save the new catalogs.
# Next, preserve the old installed state dir, rename the
# new one into place, and then remove the old one.
# Ensure in-memory catalogs get reloaded.
"""Refreshes the metadata (e.g. catalog) for one or more
publishers. Callers are responsible for locking the image.
'full_refresh' is an optional boolean value indicating whether
a full retrieval of publisher metadata (e.g. catalogs) or only
an update to the existing metadata should be performed. When
True, 'immediate' is also set to True.
'immediate' is an optional boolean value indicating whether the
a refresh should occur now. If False, a publisher's selected
repository will only be checked for updates if the update
interval period recorded in the image configuration has been
exceeded.
'pubs' is a list of publisher prefixes or publisher objects
to refresh. Passing an empty list or using the default value
implies all publishers.
'ignore_unreachable' is an optional boolean value indicating
whether unreachable repositories should be ignored. If True,
errors contacting this repository are stored in the transport
but no exception is raised, allowing an operation to continue
if an unneeded repository is not online."""
if not progtrack:
pubs_to_refresh = []
if not pubs:
# Omit disabled publishers.
if not pubs:
return
p = pub
if p.disabled:
e = apx.DisabledPublisher(p)
raise e
if not pubs_to_refresh:
return
# Verify validity of certificates before attempting network
# operations.
try:
except apx.ExpiringCertificate as e:
try:
# Ensure Image directory structure is valid.
except Exception as e:
raise
failed = []
total = 0
for pub in pubs_to_refresh:
total += 1
try:
if changed:
if not ignore_unreachable and e:
continue
except apx.PermissionsException as e:
# No point in continuing since no data can
# be written.
break
except apx.ApiException as e:
continue
finally:
if updated:
else:
if failed:
raise e
if not updated:
return
return IMG_PUB_DIR
"""Removes the metadata for the specified publisher object,
except data for installed packages.
'pub' is the object of the publisher to remove the data for.
'progtrack' is an optional ProgressTracker object.
'rebuild' is an optional boolean specifying whether image
catalogs should be rebuilt after removing the publisher's
metadata.
"""
# Build a list of paths that shouldn't be removed because they
# belong to installed packages.
excluded = [
for f in self.gen_installed_pkgs()
]
if not excluded:
else:
try:
# Discard all publisher metadata except
# package manifests as a first pass.
if entry == "pkg":
continue
else:
# Build the list of directories that can't be
# removed.
# Now try to discard only package manifests
# that aren't for installed packages.
# This removes all manifest data
# for a given package stem.
continue
# Remove only manifest data for packages
# that are not installed.
# Finally, dump any cache data for this
# publisher if possible.
except EnvironmentError as e:
raise apx._convert_error(e)
if rebuild:
"""A generator function that produces FMRI strings as it
iterates over the list of installed packages. This is
faster than gen_installed_pkgs when only the FMRI string
is needed."""
if anarchy:
# Catalog entries always have publisher prefix.
continue
yield f
"""Return an iteration through the installed packages."""
yield f
"""Return the number of installed packages."""
return sum(
)
"""Return an iteration through all the tracked pkg stems
in the set of currently installed packages. Return value
is group pkg fmri, stem"""
for a in cat.get_entry_actions(f,
"""Create an on-disk database mapping action name and key
attribute value to the action string comprising the unique
attributes of the action, for all installed actions. This is
done with a file mapping the tuple to an offset into a second
file, where those actions are kept. Once the offsets are loaded
into memory, it is simple to seek into the second file to the
given offset and read until you hit an action that doesn't
match."""
if not progtrack:
self.__actdict_timestamp = None
"actions.stripped")
"actions.offsets")
"keys.conflicting")
heap = []
# nsd is the "name-space dictionary." It maps action name
# spaces (see action.generic for more information) to
# dictionaries which map keys to pairs which contain an action
# with that key and the pfmri of the package which delivered the
# action.
nsd = {}
if not act.globally_identical:
continue
# If we can't write the temporary files, then there's no point
# in producing actdict because it depends on a synchronized
# stripped actions file.
try:
actdict = {}
# We need to make sure the files are coordinated.
# The conflicting keys file doesn't need a timestamp
# because it's not coordinated with the stripped or
# offsets files and the result of loading it isn't
# reused by this class.
cnt = 0
while heap:
# This is a tight loop, so try to avoid burning
# CPU calling into the progress tracker
# excessively.
if last_name is None:
assert last_key is None
cnt += 1
else:
assert cnt > 0
cnt = 1
else:
cnt += 1
if last_name is not None:
assert last_key is not None
assert last_offset is not None
assert cnt > 0
except BaseException as e:
try:
except:
pass
raise
# Finally, rename the temporary files into their final place.
# If we have any problems, do our best to remove them, and we'll
# try to recreate them on the read-side.
try:
except EnvironmentError as e:
else:
try:
except:
pass
"""Remove on-disk database created by _create_fast_lookups.
Should be called before updating image state to prevent the
client from seeing stale state if _create_fast_lookups is
interrupted."""
"keys.conflicting"):
try:
except EnvironmentError as e:
continue
raise apx._convert_error(e)
"""Read the file of offsets created in _create_fast_lookups()
and return the dictionary mapping action name and key value to
offset."""
try:
"actions.offsets"), "r")
except IOError as e:
raise
assert actdict is not None
return actdict
# Make sure the files are paired, and try to create them if not.
# The original action.offsets file existed and had the same
# timestamp as the stored actdict, so that actdict can be
# reused.
# If we recognize neither file's version or their timestamps
# don't match, then we blow them away and try again.
stimestamp != otimestamp:
assert actdict is not None
return actdict
# At this point, the original actions.offsets file existed, no
# actdict was saved in the image, the versions matched what was
# expected, and the timestamps of the actions.offsets and
# actions.stripped files matched, so the actions.offsets file is
# parsed to generate actdict.
actdict = {}
# This is a tight loop, so try to avoid burning
# CPU calling into the progress tracker excessively.
# Since we are already using the offset, we use that
# to damp calls back into the progress tracker.
return actdict
"""Open the actions file described in _create_fast_lookups() and
return the corresponding file object."""
"actions.stripped"), "r")
if internal:
return sversion, stimestamp
return sf
"""Load the list of keys which have conflicting actions in the
existing image. If no such list exists, then return None."""
try:
if version != "VERSION 1":
return None
except EnvironmentError as e:
return None
raise
"""Iterates through the installed actions of type 'atype'. If
'implicit_dirs' is True and 'atype' is 'dir', then include
directories only implicitly defined by other filesystem
actions."""
if implicit_dirs:
if implicit_dirs:
for d in m.get_directories(excludes):
if d not in dirs:
"""Returns a set containing the prefixes of all publishers with
installed packages."""
return cat.publishers()
if uid is not None:
return uid
# XXX What to do about IMG_PARTIAL?
try:
except KeyError:
if returnuid:
return uid
else:
raise
if gid is not None:
return gid
try:
except KeyError:
if returngid:
return gid
else:
raise
"""Since the index directory will not reliably be updated when
the image root is, this should be called prior to using the
index directory.
"""
"""Clean up any downloads that were in progress but that
did not successfully finish."""
"""Delete the directory that stores all of our cached
downloaded content. This may take a while for a large
directory hierarchy. Don't clean up caches if the
user overrode the underlying setting using PKG_CACHEDIR or
PKG_CACHEROOT. """
return
cdirs = []
continue
if not cdirs:
return
if not progtrack:
# 'Updating package cache'
"""Called when unexpected file or directory is found during
package operations; returns the path of the salvage
directory where the item was stored. Can be called with
to be salvaged. If full_path is False (the default), remove
the current mountpoint of the image from the returned
directory path"""
# This ensures that if the path is already rooted in the image,
# that it will be stored in lost+found (due to os.path.join
# behaviour with absolute path components).
# If for some reason the path wasn't rooted in the
# image, but it is an absolute one, then strip the
# absolute part so that it will be stored in lost+found
# (due to os.path.join behaviour with absolute path
# components).
# remove current mountpoint from sdir
if not full_path:
return sdir
"""Called when recovering directory contents to implement
"salvage-from" directive... full_dest_path must exist.
dest_path is the image-relative location where we salvage to,
old_path is original image-relative directory that delivered
the files we're now recovering.
When recovering directories where the salvage-from string is
a substring of the previously packaged directory, attempt
to restore as much of the old directory structure as possible
by comparing the salvage-from value with the previously
packaged directory.
but have stopped delivering that dir, replacing it with a new
The intent of the package author, was to have the
directory created as part of the salvaging operation, giving
and not to just end up with
"""
# this is here so that when salvaging the contents
# of a previously packaged directory, we attempt to
# restore as much of the old directory structure as
# possible.
try:
except OSError as e:
raise e
"""Create a temp directory under the image directory for various
purposes. If the process is unable to create a directory in the
image's temporary directory, a replacement location is found."""
try:
except (apx.PermissionsException,
return self.temporary_dir()
try:
# Force standard mode.
return rval
except EnvironmentError as e:
return self.temporary_dir()
raise apx._convert_error(e)
"""Create a temporary file under the image directory for various
purposes. If 'close' is True, close the file descriptor;
otherwise leave it open. If the process is unable to create a
file in the image's temporary directory, a replacement is
found."""
try:
except (apx.PermissionsException,
try:
if close:
except EnvironmentError as e:
raise apx._convert_error(e)
if close:
return name
else:
"""Attempts to eliminate redundant matches found during
packaging operations:
* First, stems of installed packages for publishers that
are now unknown (no longer present in the image
configuration) are dropped.
* Second, if multiple matches are still present, stems of
of installed packages, that are not presently in the
corresponding publisher's catalog, are dropped.
* Finally, if multiple matches are still present, all
stems except for those in state PKG_STATE_INSTALLED are
dropped.
Returns a list of the filtered matches, along with a dict of
their unique names."""
olist = []
# First eliminate any duplicate matches that are for unknown
# publishers (publishers which have been removed from the image
# configuration).
# Next, if there are still multiple matches, eliminate matches
# belonging to publishers that no longer have the FMRI in their
# catalog.
mlist = []
if not st["in_catalog"]:
continue
# Finally, if there are still multiple matches, and a known
# stem is installed, then eliminate any stems that do not
# have an installed version.
mlist = []
"""Avoid the specified packages... use pattern matching on
names; ignore versions."""
"""Unavoid the specified packages... use pattern matching on
names; ignore versions."""
if not_avoided:
# Don't allow unavoid if removal of the package from the
# avoid list would require the package to be installed
# as this would invalidate current image state. If the
# package is already installed though, it doesn't really
# matter if it's a target of an avoid or not.
installed_set = set([
f.pkg_name
for f in self.gen_installed_pkgs()
])
would_install = [
a
for f, a in self.gen_tracked_stems()
if a in unavoid_set and a not in installed_set
]
if would_install:
""" return dict of lists (avoided stem, pkgs w/ group
dependencies on this pkg)"""
return ret
comment):
"""Freeze the specified packages... use pattern matching on
names.
The 'pat_list' parameter contains the list of patterns of
packages to freeze.
The 'progtrack' parameter contains the progress tracker for this
operation.
The 'check_cancel' parameter contains a function to call to
check if the operation has been canceled.
The 'dry_run' parameter controls whether packages are actually
frozen.
The 'comment' parameter contains the comment, if any, which will
be associated with the packages that are frozen.
"""
p.publisher = None
return p
def __calc_frozen():
return dict([(s, __make_publisherless_fmri(p))
if dry_run:
# Get existing dictionary of frozen packages.
d = self.__freeze_dict_load()
# Update the dictionary with the new freezes and
# comment.
"""Unfreeze the specified packages... use pattern matching on
names; ignore versions.
The 'pat_list' parameter contains the list of patterns of
packages to freeze.
The 'progtrack' parameter contains the progress tracker for this
operation.
The 'check_cancel' parameter contains a function to call to
check if the operation has been canceled.
The 'dry_run' parameter controls whether packages are actually
frozen."""
def __calc_unfrozen():
# Get existing dictionary of frozen packages.
d = self.__freeze_dict_load()
# Match the user's patterns against the frozen packages
# and return the stems which matched, and the dictionary
# of the currently frozen packages.
if dry_run:
return __calc_unfrozen()[0]
unfrozen_set, d = __calc_unfrozen()
# Remove the specified packages from the frozen set.
for n in unfrozen_set:
d.pop(n, None)
return unfrozen_set
# A plan can be requested without actually performing an
# operation on the image.
try:
except apx.ConflictingActionErrors:
# Image plan evaluation can fail because of duplicate
# action discovery, but we still want to be able to
# display and log the solved FMRI changes.
"Unevaluated: merged plan had errors\n" + \
raise
"""Private helper function to perform base plan creation and
cleanup.
"""
# If pkg5.hang file is present in image dir, then
# sleep after loading configuration until file is
# gone. This is used by the test suite for signal
# handling testing, etc.
# Allow garbage collection of previous plan.
# Always start with most current (on-disk) state information.
try:
try:
if _ip_noop:
elif _op in [
elif _op in [
else:
raise RuntimeError(
except apx.ActionExecutionError as e:
raise
raise apx.InvalidPackageErrors([e])
except apx.ApiException:
raise
try:
except apx.ActionExecutionError as e:
raise
raise apx.InvalidPackageErrors([e])
finally:
"""Take a list of packages, specified in pkgs_inst, and attempt
to assemble an appropriate image plan. This is a helper
routine for some common operations in the client.
"""
variants=None):
assemble an image plan which changes them. This is a helper
routine for some common operations in the client."""
# compute dict of changing variants
if variants:
elif facets:
for f in facets:
if facets[f] is None:
new_facets.pop(f, None)
else:
new_facets[f] = facets[f]
"""Take a dictionary of mediators and attempt to assemble an
appropriate image plan to set or revert them based on the
provided version and implementation values. This is a helper
routine for some common operations in the client.
"""
# Compute dict of changing mediators.
for m in new_mediators.keys():
new_values = new_mediators[m]
if not new_values:
if m not in old_mediators:
# Nothing to revert.
del new_mediators[m]
continue
# Revert mediator to defaults.
new_mediators[m] = {}
continue
# Validate mediator, provided version, implementation,
# and source.
if not valid:
if med_version:
if valid:
new_mediators[m]["version"] = \
else:
invalid_mediations[m]["version"] = \
(med_version, error)
if med_impl:
if not valid:
invalid_mediations[m]["version"] = \
raise apx.PlanCreationException(
"""Attempt to create an appropriate image plan to bring an
image in sync with it's linked image constraints. This is a
helper routine for some common operations in the client."""
"""Create uninstall plan to remove the specified packages."""
"""Create a plan to update all packages or the specific ones as
far as possible. This is a helper routine for some common
operations in the client.
"""
"""Revert the specified files, or all files tagged as specified
in args to their manifest definitions.
"""
"""Remove non-editable files and hardlinks from an image."""
"""Reinstall non-editable files and hardlinks to an dehydrated
image."""
"""Create an image plan to fix the image. Note: verify shares
the same routine."""
"""Create an image plan that doesn't update the image in any
way."""
"""Test whether the packaging system is updated to the latest
version known to be available for this image."""
#
# This routine makes the distinction between the "target image",
# which will be altered, and the "running image", which is
# to say whatever image appears to contain the version of the
# pkg command we're running.
#
#
# There are two relevant cases here:
# 1) Packaging code and image we're updating are the same
# image. (i.e. 'pkg update')
#
# 2) Packaging code's image and the image we're updating are
# different (i.e. 'pkg update -R')
#
# In general, we care about getting the user to run the
# most recent packaging code available for their build. So,
# if we're not in the liveroot case, we create a new image
# which represents "/" on the system.
#
if not progtrack:
#
# Find the path to ourselves, and use that
# as a way to locate the image we're in. It's
# not perfect-- we could be in a developer's
# workspace, for example.
#
if refresh_allowed:
# If refreshing publisher metadata is allowed,
# then perform a refresh so that a new packaging
# system package can be discovered.
try:
except (apx.ImageFormatUpdateNeeded,
# Can't use the image to perform an
# update check and it would be wrong
# to prevent the operation from
# continuing in these cases.
cre.errmessage = \
_("pkg(7) update check failed.")
raise
finally:
if useimg:
if not pfmri or \
# If no version of the package system is installed or a
# newer version isn't available, then the client is
# "up-to-date".
return True
if inc_fmri:
# If the ips-incorporation is installed (it should be
# bypass the solver and plan evaluation if none of the
# newer versions are allowed by the incorporation.
inc_ver = None
break
if inc_ver:
break
else:
# No version is newer than installed and
# satisfied incorporation constraint.
return True
# XXX call to progress tracker that the package is being
# refreshed
# avoid set implementation uses simplejson to store a set of pkg_stems
# being avoided (explicitly or implicitly), and a set of tracked stems
# that are obsolete.
#
# format is (version, dict((pkg stem, "avoid", "implicit-avoid" or
# "obsolete"))
"""Return copy of avoid set"""
if implicit:
"""Return copy of tracked obsolete pkgs"""
"""Load avoid set fron image state directory"""
try:
with open(state_file) as f:
except EnvironmentError as e:
raise apx._convert_error(e)
except ValueError as e:
" file {state_file} in {salvaged_path}"
return
for stem in d:
if d[stem] == "avoid":
elif d[stem] == "implicit-avoid":
elif d[stem] == "obsolete":
else:
else:
obsolete=None):
"""Store avoid set to image state directory"""
if new_set is not None:
if implicit_avoid is not None:
if obsolete is not None:
if not self.__avoid_set_altered:
return
d.update(
(a, "implicit-avoid")
for a in self.__implicit_avoid_set
)
try:
except Exception as e:
str(e)))
return
# frozen dict implementation uses simplejson to store a dictionary of
# pkg_stems that are frozen, the versions at which they're frozen, and
# the reason, if given, why the package was frozen.
#
# format is (version, dict((pkg stem, (fmri, comment, timestamp))))
"""Return a list of tuples containing the fmri that was frozen,
and the reason it was frozen."""
return [
]
"""Load the dictionary containing the current state of frozen
packages."""
try:
with open(state_file) as f:
except EnvironmentError as e:
raise apx._convert_error(e)
except ValueError as e:
raise apx.UnknownFreezeFileVersion(
return d
return {}
"""Save the dictionary of frozen packages."""
# Save the dictionary to disk.
try:
except EnvironmentError as e:
raise apx._convert_error(e)
"""A boolean function that will be added to the pkg(7) exclude
mechanism to determine if an action is allowed to be installed
based on whether its publisher is going to be dehydrated or has
been currently dehydrated."""
# A closure is used so that the list of dehydrated publishers
# can be accessed.
if publisher not in dehydrated_pubs:
# Allow actions from publishers that are not
# dehydrated.
return True
if aname == "file":
return True
return True
return False
elif aname == "hardlink":
return False
return True
return __allow_action_dehydrate