#!./perl
BEGIN {
# @INC poking no longer needed w/ new MakeMaker and Makefile.PL's
# with $ENV{PERL_CORE} set
# In case we need it in future...
}
use strict;
use warnings;
# These may get re-ordered.
# RAW is a do_now as inserted by &enter
# AGG is an aggreagated do_now, as built up by &process
use constant {
RAW_NEXT => 0,
RAW_IN_LEN => 1,
RAW_OUT_BYTES => 2,
RAW_FALLBACK => 3,
AGG_MIN_IN => 0,
AGG_MAX_IN => 1,
AGG_OUT_BYTES => 2,
AGG_NEXT => 3,
AGG_IN_LEN => 4,
AGG_OUT_LEN => 5,
AGG_FALLBACK => 6,
};
# (See the algorithm in encengine.c - we're building structures for it)
# There are two sorts of structures.
# "do_now" (an array, two variants of what needs storing) is whatever we need
# to do now we've read an input byte.
# It's housed in a "do_next" (which is how we got to it), and in turn points
# to a "do_next" which contains all the "do_now"s for the next input byte.
# There will be a "do_next" which is the start state.
# For a single byte encoding it's the only "do_next" - each "do_now" points
# back to it, and each "do_now" will cause bytes. There is no state.
# For a multi-byte encoding where all characters in the input are the same
# length, then there will be a tree of "do_now"->"do_next"->"do_now"
# branching out from the start state, one step for each input byte.
# The leaf "do_now"s will all be at the same distance from the start state,
# only the leaf "do_now"s cause output bytes, and they in turn point back to
# the start state.
# For an encoding where there are varaible length input byte sequences, you
# will encounter a leaf "do_now" sooner for the shorter input sequences, but
# as before the leaves will point back to the start state.
# The system will cope with escape encodings (imagine them as a mostly
# self-contained tree for each escape state, and cross links between trees
# at the state-switching characters) but so far no input format defines these.
# The system will also cope with having output "leaves" in the middle of
# the bifurcating branches, not just at the extremities, but again no
# input format does this yet.
# There are two variants of the "do_now" structure. The first, smaller variant
# is generated by &enter as the input file is read. There is one structure
# for each input byte. Say we are mapping a single byte encoding to a
# single byte encoding, with "ABCD" going "abcd". There will be
# 4 "do_now"s, {"A" => [...,"a",...], "B" => [...,"b",...], "C"=>..., "D"=>...}
# &process then walks the tree, building aggregate "do_now" structres for
# adjacent bytes where possible. The aggregate is for a contiguous range of
# bytes which each produce the same length of output, each move to the
# same next state, and each have the same fallback flag.
# So our 4 RAW "do_now"s above become replaced by a single structure
# containing:
# ["A", "D", "abcd", 1, ...]
# ie, for an input byte $_ in "A".."D", output 1 byte, found as
# substr ("abcd", (ord $_ - ord "A") * 1, 1)
# which maps very nicely into pointer arithmetic in C for encengine.c
sub encode_U
{
# UTF-8 encode long hand - only covers part of perl's range
## my $uv = shift;
# chr() works in native space so convert value from table
# into that space before using chr().
# Now get core perl to encode that the way it likes.
return $ch;
}
sub encode_S
{
# encode single byte
## my ($ch,$page) = @_; return chr($ch);
return chr $_[0];
}
sub encode_D
{
# encode double byte MS byte first
## my ($ch,$page) = @_; return chr($page).chr($ch);
}
sub encode_M
{
# encode Multi-byte - single for 0..255 otherwise double
## my ($ch,$page) = @_;
## return &encode_D if $page;
## return &encode_S;
return chr $_[0];
}
my %encode_types = (U => \&encode_U,
S => \&encode_S,
D => \&encode_D,
M => \&encode_M,
);
# Win32 does not expand globs on command line
my %opt;
# I think these are:
# -Q to disable the duplicate codepoint test
# -S make mapping errors fatal
# -q to remove comments written to output files
# -O to enable the (brute force) substring optimiser
# -o <output> to specify the output file name (else it's the first arg)
# -f <inlist> to give a file with a list of input files (else use the args)
# -n <name> to name the encoding (else use the basename of the input file.
# This really should go first, else the die here causes empty (non-erroneous)
# output files to be written.
my @encfiles;
# -F is followed by name of file containing list of filenames
} else {
}
{
$doC = 1;
foreach my $fh (\*C,\*D,\*H)
{
/*
$^X $0 @orig_ARGV
*/
}
{
print C "#define U8 U8\n";
}
}
{
$doEnc = 1;
}
{
$doUcm = 1;
}
{
$doPet = 1;
}
my %encoding;
my %strings;
my $string_acc;
my %strings_in_acc;
my $saved = 0;
my $subsave = 0;
my $strings = 0;
sub cmp_name
{
if ($a =~ /^.*-(\d+)/)
{
my $an = $1;
if ($b =~ /^.*-(\d+)/)
{
my $r = $an <=> $1;
return $r if $r;
}
}
return $a cmp $b;
}
{
{
{
}
else
{
}
}
else
{
}
}
if ($doC)
{
{
}
{
# push(@{$encoding{$name}},outstring(\*C,$e2u->{Cname}.'_def',$erep));
}
{
# my ($e2u,$u2e,$rep,$min_el,$max_el,$rsym) = @{$encoding{$enc}};
#my @info = ($e2u->{Cname},$u2e->{Cname},$rsym,length($rep),$min_el,$max_el);
my $replen = 0;
$sym =~ s/\W+/_/g;
# This is to make null encoding work -- dankogai
$info[$i] ||= 1;
}
# end of null tweak -- dankogai
}
{
$sym =~ s/\W+/_/g;
}
{
my $mod = $1;
print C <<'END';
static void
{
dSP;
int i = 0;
{
}
}
print C "BOOT:\n{\n";
print C "}\n";
}
# Close in void context is bad, m'kay
$saved, $perc_saved if $saved;
}
elsif ($doEnc)
{
{
}
}
elsif ($doUcm)
{
{
}
}
# writing half meg files and then not checking to see if you just filled the
# disk is bad, m'kay
# End of the main program.
sub compile_ucm
{
my $e2u = {};
my $u2e = {};
my $cs;
my %attr;
while (<$fh>)
{
s/#.*$//;
if (/^\s*<(\w+)>\s+"?([^"]*)"?\s*$/i) # " # Grrr
{
$attr{$1} = $2;
}
}
{
}
else
{
}
my $erep;
my $urep;
my $max_el;
my $min_el;
{
#my @byte;
#$attr{'subchar'} =~ /^\s*/cg;
#push(@byte,$1) while $attr{'subchar'} =~ /\G\\x([0-9a-f]+)/icg;
#$erep = join('',map(chr(hex($_)),@byte));
}
my $nfb = 0;
my $hfb = 0;
while (<$fh>)
{
s/#.*$//;
next if /^\s*$/;
}
}
if (@uni)
{
{
$hfb++;
}
else
{
$nfb++;
}
# $fb is fallback flag
# 0 - round trip safe
# 1 - fallback for unicode -> enc
# 2 - skip sub-char mapping
# 3 - fallback enc -> unicode
}
else
{
warn $_;
}
}
{
}
}
sub compile_enc
{
my $e2u = {};
my $u2e = {};
my $type;
{
}
# Do the hash lookup once, rather than once per function call. 4% speedup.
my $type_func = $encode_types{$type};
# Save a defined test by setting these to defined values.
my $min_el = ~0; # A very big integer
my $max_el = 0; # Anything must be longer than 0
{
}
my $errors;
my $seen;
# use -Q to silence the seen test. Makefile.PL uses this by default.
do
{
my $ch = 0;
my $i = 16;
do
{
# So why is it 1% faster to leave the my here?
$line =~ s/\r\n$/\n/;
die "$.:${line}Line should be exactly 65 characters long including
# Split line into groups of 4 hex digits, convert groups to ints
# This takes 65.35
# map {hex $_} $line =~ /(....)/g
# This takes 63.75 (2.5% less time)
# unpack "n*", pack "H*", $line
# There's an implicit loop in map. Loops are bad, m'kay. Ops are bad, m'kay
# Doing it as while ($line =~ /(....)/g) took 74.63
{
next if $val == 0xFFFD;
{
# We're doing the test.
# We don't need to read this quickly, so storing it as a scalar,
# rather than 3 (anon array, plus the 2 scalars it holds) saves
# RAM and may make us faster on low RAM systems. [see __END__]
{
$errors++;
}
else
{
}
}
# Passing 2 extra args each time is 3.6% slower!
# Even with having to add $fallback ||= 0 later
}
else
{
# No character at this position
# enter($e2u,$ech,undef,$e2u);
}
$ch++;
}
} while --$i;
} while --$pages;
}
# my ($a,$s,$d,$t,$fb) = @_;
sub enter {
# state we shift to after this (multibyte) input character defaults to same
# as current state.
# Making sure it is defined seems to be faster than {no warnings;} in
# &process, or passing it in as 0 explicity.
# XXX $fallback ||= 0;
# Start at the beginning and work forwards through the string to zero.
# effectively we are removing 1 character from the front each time
# but we don't actually edit the string. [this alone seems to be 14% speedup]
# Hence -$pos is the length of the remaining string.
while (1) {
# RAW_NEXT => 0,
# RAW_IN_LEN => 1,
# RAW_OUT_BYTES => 2,
# RAW_FALLBACK => 3,
# to unicode an array would seem to be better, because the pages are dense.
# from unicode can be very sparse, favouring a hash.
# hash using the bytes (all length 1) as keys rather than ord value,
# as it's easier to sort these in &process.
# It's faster to always add $fallback even if it's undef, rather than
# choosing between 3 and 4 element array. (hence why we set it defined
# above)
# When $pos was -1 we were at the last input character.
$do_now->[RAW_OUT_BYTES] = $outbytes;
return;
}
# Tail recursion. The intermdiate state may not have a name yet.
}
}
# This is purely for optimistation. It's just &enter hard coded for $fallback
# of 0, using only a 3 entry array ref to save memory for every entry.
sub enter_fb0 {
while (1) {
$do_now->[RAW_OUT_BYTES] = $outbytes;
return;
}
}
}
sub process
{
$name =~ s/\W+/_/g;
my @ent;
$agg_max_in = 0;
# RAW_NEXT => 0,
# RAW_IN_LEN => 1,
# RAW_OUT_BYTES => 2,
# RAW_FALLBACK => 3,
# Now we are converting from raw to aggregate, switch from 1 byte strings
# to numbers
$fallback ||= 0;
if ($l &&
# If this == fails, we're going to reset $agg_max_in below anyway.
$b == ++$agg_max_in &&
# References in numeric context give the pointer as an int.
$agg_in_len == $in_len &&
$agg_out_len == length $out_bytes &&
# && length($l->[AGG_OUT_BYTES]) < 16
) {
# my $i = ord($b)-ord($l->[AGG_MIN_IN]);
# we can aggregate this byte onto the end.
$l->[AGG_MAX_IN] = $b;
$l->[AGG_OUT_BYTES] .= $out_bytes;
} else {
# AGG_MIN_IN => 0,
# AGG_MAX_IN => 1,
# AGG_OUT_BYTES => 2,
# AGG_NEXT => 3,
# AGG_IN_LEN => 4,
# AGG_OUT_LEN => 5,
# AGG_FALLBACK => 6,
# Reset the last thing we saw, plus set 5 lexicals to save some derefs.
# (only gains .6% on euc-jp -- is it worth it?)
push @ent, $l = [$b, $agg_max_in = $b, $out_bytes, $agg_next = $next,
}
} else {
}
}
# encengine.c rules say that last entry must be for 255
push @ent, [1+$agg_max_in, 255,undef,$a,0,0];
}
}
sub addstrings
{
# String tables
foreach my $b (@{$a->{'Entries'}})
{
next unless $b->[AGG_OUT_LEN];
$strings{$b->[AGG_OUT_BYTES]} = undef;
}
if ($a->{'Forward'})
{
}
$a->{'DoneStrings'} = 1;
foreach my $b (@{$a->{'Entries'}})
{
}
}
sub outbigstring
{
# Make the big string in the string accumulator. Longest first, on the hope
# that this makes it more likely that we find the short strings later on.
# Not sure if it helps sorting strings of the same length lexcically.
my $index = index $string_acc, $s;
$strings_in_acc{$s} = $index;
} else {
OPTIMISER: {
my $sublength = length $s;
# progressively lop characters off the end, to see if the start of
# the new string overlaps the end of the accumulator.
$subsave += $sublength;
# append the last bit on the end.
}
# or if the end of the new string overlaps the start of the
# accumulator
next unless substr ($string_acc, 0, $sublength)
# well, the last $sublength characters of the accumulator match.
# so as we're prepending to the accumulator, need to shift all our
# existing offsets forwards
$_ += $sublength foreach values %strings_in_acc;
$subsave += $sublength;
$strings_in_acc{$s} = 0;
# append the first bit on the start.
}
}
# Optimiser (if it ran) found nothing, so just going have to tack the
# whole thing on the end.
$strings_in_acc{$s} = length $string_acc;
$string_acc .= $s;
};
}
}
# We have a single long line. Split it at convenient commas.
}
sub findstring {
my $offset = $strings_in_acc{$s};
}
sub outtable
{
$a->{'Done'} = 1;
foreach my $b (@{$a->{'Entries'}})
{
}
foreach my $b (@{$a->{'Entries'}})
{
# $end |= 0x80 if $fb; # what the heck was on your mind, Nick? -- Dan
if ($l)
{
}
else
{
}
}
}
sub output_enc
{
{
}
}
sub decode_U
{
my $s = shift;
}
my @uname;
sub char_names
{
{
my $name = $3;
last if $s >= 0x10000;
for (my $i = $s; $i <= $e; $i++)
{
# print sprintf("U%04X $name\n",$i);
}
}
}
sub output_ucm_page
{
# warn sprintf("Page %x\n",$pre);
# RAW_NEXT => 0,
# RAW_IN_LEN => 1,
# RAW_OUT_BYTES => 2,
# RAW_FALLBACK => 3,
$fallback ||= 0;
}
#foreach my $c (split(//,$out_bytes)) {
# $s .= sprintf "\\x%02X",ord($c);
#}
# 9.5% faster changing that loop to this:
} else {
}
}
}
sub output_ucm
{
{
}
{
}
{
{
}
}
my @cmap;
{
}
}
);
sub find_e2x{
}
sub {
-f _ or return;
}
return;
$_E2X = $d;
# warn "$_E2X => ", scalar localtime($e2x_dir{$d});
return $_E2X;
}
}
sub make_makefile_pl
{
# our used for variable expanstion
$_Enc2xs = $0;
$_Name = shift;
exit;
}
);
{
# our used for variable expanstion
eval { require "Encode/$f"; };
}
}
}
$_ModLines .=
}
1);
exit;
}
sub _mkversion{
}
sub _print_expand{
return;
}
}
my $asis = 0;
if (/^#### END_OF_HEADER/){
$asis = 1; next;
}
print $out $_;
}
}
enc2xs -C
=over 4
=item 0.
in I<my.ucm>. C<$> is a shell prompt.
$ ls -F
=item 1.
Issue a command as follows;
$ enc2xs -M My my.ucm
generating Makefile.PL
generating My.pm
generating README
generating Changes
Now take a look at your current directory. It should look like this.
$ ls -F
The following files were created.
Makefile.PL - MakeMaker script
My.pm - Encode submodule
=over 4
=item 1.1.
If you want *.ucm installed together with the modules, do as follows;
$ mkdir Encode
$ mv *.ucm Encode
$ enc2xs -M My Encode/*ucm
=back
=item 2.
=item 3.
=item 4.
$ make
-o encode_t.c -f encode_t.fnm
....
$
=item 5.
You can "make install" already but you should test first.
$ make test
All tests successful.
Files=1, Tests=2, 0 wallclock secs
( 0.09 cusr + 0.01 csys = 0.09 CPU)
=item 6.
If you are content with the test result, just "make install"
=item 7.
enc2xs -C
to update Encode::ConfigLocal, a module that controls local settings.
After that, "use Encode;" is enough to load your encodings on demand.
=back
=head1 The Unicode Character Map
Encode uses the Unicode Character Map (UCM) format for source character
this is the recommended formet for Encode now.
A UCM file looks like this.
#
# Comments
#
<code_set_name> "US-ascii" # Required
<code_set_alias> "ascii" # Optional
<mb_cur_min> 1 # Required; usually 1
<subchar> \x3F # Substitution char
#
CHARMAP
<U0000> \x00 |0 # <control>
<U0001> \x01 |0 # <control>
<U0002> \x02 |0 # <control>
....
<U007C> \x7C |0 # VERTICAL LINE
<U007D> \x7D |0 # RIGHT CURLY BRACKET
<U007E> \x7E |0 # TILDE
<U007F> \x7F |0 # <control>
END CHARMAP
=over 4
=item *
Anything that follows C<#> is treated as a comment.
=item *
The header section continues until a line containing the word
CHARMAP. This section has a form of I<E<lt>keywordE<gt> value>, one
pair per line. Strings used as values must be quoted. Barewords are
treated as numbers. I<\xXX> represents a byte.
Most of the keywords are self-explanatory. I<subchar> means
substitution character, not subcharacter. When you decode a Unicode
sequence to this encoding but no matching character is found, the byte
sequence defined here will be used. For most cases, the value here is
\x3F; in ASCII, this is a question mark.
=item *
CHARMAP starts the character map section. Each line has a form as
follows:
<UXXXX> \xXX.. |0 # comment
^ ^ ^
| | +- Fallback flag
| +-------- Encoded byte sequence
+-------------- Unicode Character ID in hex
The format is roughly the same as a header section except for the
fallback flag: | followed by 0..3. The meaning of the possible
values is as follows:
=over 4
=item |0
Round trip safe. A character decoded to Unicode encodes back to the
same byte sequence. Most characters have this flag.
=item |1
Fallback for unicode -> encoding. When seen, enc2xs adds this
character for the encode map only.
=item |2
Skip sub-char mapping should there be no code point.
=item |3
Fallback for encoding -> unicode. When seen, enc2xs adds this
character for the decode map only.
=back
=item *
And finally, END OF CHARMAP ends the section.
=back
When you are manually creating a UCM file, you should copy ascii.ucm
or an existing encoding which is close to yours, rather than write
your own from scratch.
When you do so, make sure you leave at least B<U0000> to B<U0020> as
is, unless your environment is EBCDIC.
B<CAVEAT>: not all features in UCM are implemented. For example,
icu:state is not used. Because of that, you need to write a perl
module if you want to support algorithmical encodings, notably
the ISO-2022 series. Such modules include L<Encode::JP::2022_JP>,
L<Encode::KR::2022_KR>, and L<Encode::TW::HZ>.
=head2 Coping with duplicate mappings
When you create a map, you SHOULD make your mappings round-trip safe.
$data> stands for all characters that are marked as C<|0>. Here is
how to make sure:
=over 4
=item *
Sort your map in Unicode order.
=item *
When you have a duplicate entry, mark either one with '|1' or '|3'.
=item *
And make sure the '|1' or '|3' entry FOLLOWS the '|0' entry.
=back
Here is an example from big5-eten.
<U2550> \xF9\xF9 |0
<U2550> \xA2\xA4 |3
Internally Encoding -> Unicode and Unicode -> Encoding Map looks like
this;
E to U U to E
--------------------------------------
\xF9\xF9 => U2550 U2550 => \xF9\xF9
\xA2\xA4 => U2550
So it is round-trip safe for \xF9\xF9. But if the line above is upside
down, here is what happens.
E to U U to E
--------------------------------------
\xA2\xA4 => U2550 U2550 => \xF9\xF9
(\xF9\xF9 => U2550 is now overwritten!)
The Encode package comes with F<ucmlint>, a crude but sufficient
utility to check the integrity of a UCM file. Check under the
=head1 Bookmarks
=over 4
=item *
ICU Home Page
=item *
ICU Character Mapping Tables
=item *
ICU:Conversion Data
=back
=head1 SEE ALSO
L<Encode>,
L<perlmod>,
L<perlpod>
=cut
# -Q to disable the duplicate codepoint test
# -S make mapping errors fatal
# -q to remove comments written to output files
# -O to enable the (brute force) substring optimiser
# -f <inlist> to give a file with a list of input files (else use the args)
# -n <name> to name the encoding (else use the basename of the input file.
Swapping is bad, m'kay :-)