#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
#
import os
import errno
import time
import hashlib
"""Opens all data holders in data_list and ensures that the
versions are consistent among all of them.
It retries several times in case a race condition between file
migration and open is encountered.
Note: Do not set timeout to be 0. It will cause an exception to be
immediately raised.
"""
missing = None
cur_version = None
# The assignments to cur_version and missing cannot be
# placed here. They must be reset prior to breaking out of the
# for loop so that the while loop condition will be true. They
# cannot be placed after the for loop since that path is taken
# when all files are missing or opened successfully.
for d in data_list:
# All indexes must have the same version and all must
# either be present or absent for a successful return.
# If one of these conditions is not met, the function
# tries again until it succeeds or the time spent in
# in the function is greater than timeout.
try:
# If we get here, then the current index file
# is present.
if missing == None:
elif missing:
missing = None
cur_version = None
break
d.set_file_handle(fh, f)
version_num = \
# Read the version. If this is the first file,
# set the expected version otherwise check that
# the version matches the expected version.
if cur_version == None:
elif not (cur_version == version_num):
# Got inconsistent versions, so close
# all files and try again.
for d in data_list:
missing = None
cur_version = None
break
except IOError as e:
# If the index file is missing, ensure
# that previous files were missing as
# well. If not, try again.
for d in data_list:
missing = None
cur_version = None
break
else:
for d in data_list:
raise
if missing:
assert cur_version == None
# The index is missing (ie, no files were present).
return None
else:
assert cur_version is not None
return cur_version
"""Base class for all data storage used by the indexer and
queryEngine. All members must have a file name and maintain
an internal file handle to that file as instructed by external
calls.
"""
self._file_handle = None
self._file_path = None
if self._file_handle:
raise RuntimeError("setting an extant file handle, "
"must close first, fp is: " + f_path)
else:
return self._file_path
"""Closes the file handle and clears it so that it cannot
be reused.
"""
if self._file_handle:
self._file_handle = None
"""Writes the dictionary in the expected format.
Note: Only child classes should call this method.
"""
version_string = "VERSION: "
"""This method uses the modification time and the file size
to (heuristically) determine whether the file backing this
storage has changed since it was last read.
"""
return True
return not self._have_read
"""This uses consistent open to ensure that the version line
processing is done consistently and that only a single function
actually opens files stored using this class.
"""
"""Class for representing the main dictionary file
"""
# Here is an example of a line from the main dictionary, it is
# explained below:
# %25gconf.xml file!basename@basename#579,13249,13692,77391,77628
#
# Each line begins with a urllib quoted search token. It's followed by
# a set of space separated lists. Each of these lists begin with an
# action type. It's separated from its sublist by a '!'. Next is the
# key type, which is separated from its sublist by a '@'. Next is the
# full value, which is used in set actions to hold the full value which
# matched the token. It's separated from its sublist by a '#'. The
# next token (579) is the fmri id. The subsequent comma separated
# values are the byte offsets into that manifest of the lines containing
# that token.
self._old_suffix = None
"""This class relies on external methods to write the file.
Making this empty call to protected_write_dict_file allows the
file to be set up correctly with the version number stored
correctly.
"""
version_num, [])
"""Return the file handle. Note that doing
anything other than sequential reads or writes
to or from this file_handle may result in unexpected
behavior. In short, don't use seek.
"""
return self._file_handle
"""Parses one line of a main dictionary file.
Changes to this function must be paired with changes to
write_main_dict_line below.
This should produce the same data structure that
_write_main_dict_line in indexer.py creates to write out each
line.
"""
res = []
at_res = []
st_res = []
fv_res = []
offsets = [
]
(pfmri_index, offsets))
"""Pulls the token out of a line from a main dictionary file.
Changes to this function must be paired with changes to
write_main_dict_line below.
"""
"""Paired with parse_main_dict_line above. Transforms a token
and its data into the string which can be written to the main
dictionary.
The "token" parameter is the token whose index line is being
generated.
The "entries" parameter is a list of lists of lists and so on.
It contains information about where and how "token" was seen in
manifests. The depth of all lists at each level must be
consistent, and must match the length of "sep_chars" and
"quote". The details of the contents on entries are described
in _write_main_dict_line in indexer.py.
"""
sep_chars[4],
return res + "\n"
"""Returns the number of entries removed during a second phase
of indexing.
"""
# This returns 0 because this class is not responsible for
# storing anything in memory.
return 0
"""Moves the existing file with self._name in directory
use_dir to a new file named self._name + suffix in directory
use_dir. If it has done this previously, it removes the old
file it moved. It also opens the newly moved file and uses
that as the file for its file handle.
"""
assert self._file_handle is None
if self._old_suffix is not None:
"""Used when both a list and a dictionary are needed to
store the information. Used for bidirectional lookup when
one item is an int (an id) and the other is not (an entity). It
maintains a list of empty spots in the list so that adding entities
can take advantage of unused space. It encodes empty space as a blank
line in the file format and '' in the internal list.
"""
decode_function=lambda x: x):
self._list_of_empties = []
"""Adds an entity consistently to the list and dictionary
allowing bidirectional lookup.
"""
else:
else:
if not(is_empty):
return use_id
"""deletes in_id from the list and the dictionary """
"""deletes the entity from the list and the dictionary """
"""returns the id of entity """
"""Adds entity if it's not previously stored and returns the
id for entity.
"""
# This code purposefully reimplements add_entity
# code. Replacing the function calls to has_entity, add_entity,
# and get_id with direct access to the data structure gave a
# speed up of a factor of 4. Because this is a very hot path,
# the tradeoff seemed appropriate.
if self._list_of_empties:
else:
else:
"""return the entity in_id maps to """
"""check if entity is in storage """
"""Check if the structure has any empty elements which
can be filled with data.
"""
"""returns the next id which maps to no element """
"""Passes self._list to the parent class to write to a file.
"""
"""Reads in a dictionary previously stored using the above
call
"""
assert self._file_handle
# A blank line means that id can be reused.
if line == "\n":
else:
"""Returns the number of entries removed during a second phase
of indexing.
"""
"""Class used when only entity -> id lookup is needed
"""
"""Reads in a dictionary stored in line number -> entity
format
"""
"""Returns the number of entries removed during a second phase
of indexing.
"""
"""Dictionary which allows dynamic update of its storage
"""
if " " in str:
else:
return "0" + str
"""Reads in a dictionary stored in with an entity
and its number on each line.
"""
else:
"""Opens the output file for this class and prepares it
to be written via write_entity.
"""
"""Writes the entity out to the file with my_id """
assert self._file_handle is not None
""" Generates an iterable list of string representations of
the dictionary that the parent's protected_write_dict_file
function can call.
"""
version_num, [])
"""Returns the number of entries removed during a second phase
of indexing.
"""
return 0
# In order to interoperate with older clients, we must use sha-1
# here.
"""Set the has value."""
"""Calculate the hash value of the sorted members of vals."""
# In order to interoperate with older clients, we must use sha-1
# here.
for v in vl:
# Unicode-objects must be encoded before hashing.
"""Write self.hash_val out to a line in a file """
"""Process a dictionary file written using the above method
"""
res = 0
assert res < 1
return res
"""Check the hash value of vals against the value stored
in the file for this object."""
if not self._have_read:
"""Returns the number of entries removed during a second phase
of indexing."""
return 0
"""Used when only set membership is desired.
This is currently designed for exclusive use
with storage of fmri.PkgFmris. However, that impact
is only seen in the read_and_discard_matching_from_argument
method.
"""
"""Remove entity purposfully assumes that entity is
already in the set to be removed. This is useful for
error checking and debugging.
"""
"""Write each member of the set out to a line in a file """
"""Process a dictionary file written using the above method
"""
assert self._file_handle
res = 0
res = i + 1
return res
"""Reads the file and removes all frmis in the file
from fmri_set.
"""
if self._file_handle:
"""Returns the number of entries removed during a second phase
of indexing."""
"""Class used to store and process fmri to offset mappings. It does
delta compression and deduplication of shared offset sets when writing
to a file."""
"""file_name is the name of the file to write to or read from.
p_id_trans is an object which has a get entity method which,
when given a package id number returns the PkgFmri object
for that id number."""
self._fmri_offsets = {}
"""Adds a package id number and an associated offset to the
existing dictionary."""
try:
except KeyError:
"""Does delta encoding of offsets to reduce space by only
storing the difference between the current offset and the
previous offset. It also performs deduplication so that all
packages with the same set of offsets share a common bucket."""
inv = {}
old_o = 0
bucket = []
old_o = o
if h not in inv:
inv[h] = []
return inv
"""For a given offset string, a list of package id numbers,
and a translator from package id numbers to PkgFmris, returns
the string which represents that information. Its format is
space separated package fmris, followed by a !, followed by
space separated offsets which have had delta compression
performed."""
return " ".join([
]) + "!" + offset_str
"""Write the mapping of package fmris to offset sets out
to the file."""
version_num, (
for o in inv
))
"""Read a file written by the above function and store the
information in a dictionary."""
assert self._file_handle
for l in self._file_handle:
"""For a list of strings of offsets, undo the delta compression
that has been performed."""
old_o = 0
ret = []
for o in offs:
old_o = o
return ret
"""For a given function which returns true if it matches the
desired fmri, return the offsets which are associated with the
fmris which match."""
offs = []
if match_func(p):
break