# ====================================================================
# Written by David S. Miller <davem@devemloft.net> and Andy Polyakov
# <appro@openssl.org>. The module is licensed under 2-clause BSD
# license. November 2012. All rights reserved.
# ====================================================================
######################################################################
# Montgomery squaring-n-multiplication module for SPARC T4.
#
# The module consists of three parts:
#
# 1) collection of "single-op" subroutines that perform single
# operation, Montgomery squaring or multiplication, on 512-,
# 1024-, 1536- and 2048-bit operands;
# 2) collection of "multi-op" subroutines that perform 5 squaring and
# 1 multiplication operations on operands of above lengths;
# 3) fall-back and helper VIS3 subroutines.
#
# RSA sign is dominated by multi-op subroutine, while RSA verify and
# DSA - by single-op. Special note about 4096-bit RSA verify result.
# Operands are too long for dedicated hardware and it's handled by
# VIS3 code, which is why you don't see any improvement. It's surely
# possible to improve it [by deploying 'mpmul' instruction], maybe in
# the future...
#
# Performance improvement.
#
# 64-bit process, VIS3:
# sign verify sign/s verify/s
# rsa 1024 bits 0.000628s 0.000028s 1592.4 35434.4
# rsa 2048 bits 0.003282s 0.000106s 304.7 9438.3
# rsa 4096 bits 0.025866s 0.000340s 38.7 2940.9
# dsa 1024 bits 0.000301s 0.000332s 3323.7 3013.9
# dsa 2048 bits 0.001056s 0.001233s 946.9 810.8
#
# 64-bit process, this module:
# sign verify sign/s verify/s
# rsa 1024 bits 0.000256s 0.000016s 3904.4 61411.9
# rsa 2048 bits 0.000946s 0.000029s 1056.8 34292.7
# rsa 4096 bits 0.005061s 0.000340s 197.6 2940.5
# dsa 1024 bits 0.000176s 0.000195s 5674.7 5130.5
# dsa 2048 bits 0.000296s 0.000354s 3383.2 2827.6
#
######################################################################
# 32-bit process, VIS3:
# sign verify sign/s verify/s
# rsa 1024 bits 0.000665s 0.000028s 1504.8 35233.3
# rsa 2048 bits 0.003349s 0.000106s 298.6 9433.4
# rsa 4096 bits 0.025959s 0.000341s 38.5 2934.8
# dsa 1024 bits 0.000320s 0.000341s 3123.3 2929.6
# dsa 2048 bits 0.001101s 0.001260s 908.2 793.4
#
# 32-bit process, this module:
# sign verify sign/s verify/s
# rsa 1024 bits 0.000301s 0.000017s 3317.1 60240.0
# rsa 2048 bits 0.001034s 0.000030s 966.9 33812.7
# rsa 4096 bits 0.005244s 0.000341s 190.7 2935.4
# dsa 1024 bits 0.000201s 0.000205s 4976.1 4879.2
# dsa 2048 bits 0.000328s 0.000360s 3051.1 2774.2
#
# 32-bit code is prone to performance degradation as interrupt rate
# dispatched to CPU executing the code grows. This is because in
# standard process of handling interrupt in 32-bit process context
# upper halves of most integer registers used as input or output are
# zeroed. This renders result invalid, and operation has to be re-run.
# If CPU is "bothered" with timer interrupts only, the penalty is
# hardly measurable. But in order to mitigate this problem for higher
# interrupt rates contemporary Linux kernel recognizes biased stack
# even in 32-bit process context and preserves full register contents.
# for details.
require "sparcv9_modes.pl";
#include "sparc_arch.h"
#include <openssl/fipssyms.h>
#ifdef __arch64__
#endif
#ifdef __PIC__
#endif
########################################################################
# Register layout for mont[mul|sqr] instructions.
# For details see "Oracle SPARC Architecture 2011" manual at
#
my @R=map("%f".2*$_,(0..11,30,31,12..29));
my @N=(map("%l$_",(0..7)),map("%o$_",(0..5))); @N=(@N,@N,@N[0..3]);
my @A=(@N[0..13],@R[14..31]);
my @B=(map("%i$_",(0..5)),map("%l$_",(0..7))); @B=(@B,@B,map("%o$_",(0..3)));
########################################################################
# int bn_mul_mont_t4_$NUM(u64 *rp,const u64 *ap,const u64 *bp,
# const u64 *np,const BN_ULONG *n0);
#
sub generate_bn_mul_mont_t4() {
my $NUM=shift;
.align 32
#ifdef __arch64__
#elif defined(SPARCV9_64BIT_STACK)
#else
#endif
#ifndef __arch64__
#endif
# load ap[$NUM] ########################################################
sllx @A[$i],32,@A[$i]
or $lo,@A[$i],@A[$i]
}
for(; $i<$NUM; $i++) {
}
# load np[$NUM] ########################################################
sllx @N[$i],32,@N[$i]
or $lo,@N[$i],@N[$i]
}
for(; $i<28 && $i<$NUM; $i++) {
sllx @N[$i],32,@N[$i]
or $lo,@N[$i],@N[$i]
}
for(; $i<$NUM; $i++) {
sllx @N[$i],32,@N[$i]
or $lo,@N[$i],@N[$i]
}
# load bp[$NUM] ########################################################
sllx @B[$i],32,@B[$i]
or $lo,@B[$i],@B[$i]
}
for(; $i<$NUM; $i++) {
sllx @B[$i],32,@B[$i]
or $lo,@B[$i],@B[$i]
}
# magic ################################################################
#ifndef __arch64__
#endif
#ifdef __arch64__
#else
#endif
# save tp[$NUM] ########################################################
movxtod @A[$i],@R[$i]
}
#ifdef __arch64__
#else
#endif
@R[$i] =~ /%f([0-9]+)/;
}
for(; $i<$NUM; $i++) {
}
.align 32
}
for ($i=8;$i<=32;$i+=8) {
&generate_bn_mul_mont_t4($i);
}
########################################################################
#
sub load_ccr {
}
sub load_b_pair {
}
sub load_b {
}
########################################################################
# int bn_pwr5_mont_t4_$NUM(u64 *tp,const u64 *np,const BN_ULONG *n0,
# const u64 *pwrtbl,int pwr,int stride);
#
sub generate_bn_pwr5_mont_t4() {
my $NUM=shift;
.align 32
#ifdef __arch64__
#elif defined(SPARCV9_64BIT_STACK)
#else
#endif
#ifndef __arch64__
#endif
# load tp[$NUM] ########################################################
}
for(; $i<$NUM; $i++) {
}
# load np[$NUM] ########################################################
}
for(; $i<28 && $i<$NUM; $i++) {
}
for(; $i<$NUM; $i++) {
}
# load pwrtbl[pwr] ########################################################
.align 16
}
for(; $i<$NUM; $i+=2) {
}
# magic ################################################################
for($i=0; $i<5; $i++) {
#ifndef __arch64__
#endif
}
#ifndef __arch64__
#endif
#ifdef __arch64__
#else
#endif
# save tp[$NUM] ########################################################
movxtod @A[$i],@R[$i]
}
#ifdef __arch64__
#else
#endif
for($i=0; $i<$NUM; $i++) {
}
}
for ($i=8;$i<=32;$i+=8) {
&generate_bn_pwr5_mont_t4($i);
}
{
########################################################################
# Fall-back subroutines
#
# copy of bn_mul_mont_vis3 adjusted for vectors of 64-bit values
#
(map("%g$_",(1..5)),map("%o$_",(0..5,7)));
# int bn_mul_mont(
.align 32
# +-------------------------------+<----- %sp
# . .
# +-------------------------------+<----- aligned at 64 bytes
# | __int64 tmp[0] |
# +-------------------------------+
# . .
# . .
# +-------------------------------+<----- aligned at 64 bytes
# . .
.align 16
.L1st:
!.L1st
.align 16
.Louter:
.align 16
.Linner:
!.Linner
sub $i, 8, $i
.align 16
.Lsub:
.align 16
# int bn_mul_mont_gather5(
# int power);
.align 32
# +-------------------------------+<----- %sp
# . .
# +-------------------------------+<----- aligned at 64 bytes
# | __int64 tmp[0] |
# +-------------------------------+
# . .
# . .
# +-------------------------------+<----- aligned at 64 bytes
# . .
.align 16
.L1st_g5:
!.L1st_g5
.align 16
.align 16
sub $i, 8, $i
.align 16
.Lsub_g5:
.align 16
}
.align 32
.align 32
.align 32
.asciz "Montgomery Multiplication for SPARC T4, David S. Miller, Andy Polyakov"
.align 4
&emit_assembler();
close STDOUT;