ÿØÿà�JFIF������ÿápExif��II*������[������¼ p!ranha?
Server IP : 104.21.87.198  /  Your IP : 172.71.81.77
Web Server : Apache/2.2.15 (CentOS)
System : Linux GA 2.6.32-431.1.2.0.1.el6.x86_64 #1 SMP Fri Dec 13 13:06:13 UTC 2013 x86_64
User : apache ( 48)
PHP Version : 5.6.38
Disable Function : NONE
MySQL : ON  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : OFF
Directory :  /usr/src/openssl-1.0.1g/crypto/bn/asm/

Upload File :
Curr3nt_D!r [ Writeable ] D0cum3nt_r0Ot [ Writeable ]

 
Command :
Current File : /usr/src/openssl-1.0.1g/crypto/bn/asm/x86_64-mont.pl
#!/usr/bin/env perl

# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================

# October 2005.
#
# Montgomery multiplication routine for x86_64. While it gives modest
# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
# than twice, >2x, as fast. Most common rsa1024 sign is improved by
# respectful 50%. It remains to be seen if loop unrolling and
# dedicated squaring routine can provide further improvement...

# July 2011.
#
# Add dedicated squaring procedure. Performance improvement varies
# from platform to platform, but in average it's ~5%/15%/25%/33%
# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.

# August 2011.
#
# Unroll and modulo-schedule inner loops in such manner that they
# are "fallen through" for input lengths of 8, which is critical for
# 1024-bit RSA *sign*. Average performance improvement in comparison
# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.

$flavour = shift;
$output  = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }

$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);

$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";

open OUT,"| \"$^X\" $xlate $flavour $output";
*STDOUT=*OUT;

# int bn_mul_mont(
$rp="%rdi";	# BN_ULONG *rp,
$ap="%rsi";	# const BN_ULONG *ap,
$bp="%rdx";	# const BN_ULONG *bp,
$np="%rcx";	# const BN_ULONG *np,
$n0="%r8";	# const BN_ULONG *n0,
$num="%r9";	# int num);
$lo0="%r10";
$hi0="%r11";
$hi1="%r13";
$i="%r14";
$j="%r15";
$m0="%rbx";
$m1="%rbp";

$code=<<___;
.text

.globl	bn_mul_mont
.type	bn_mul_mont,\@function,6
.align	16
bn_mul_mont:
	test	\$3,${num}d
	jnz	.Lmul_enter
	cmp	\$8,${num}d
	jb	.Lmul_enter
	cmp	$ap,$bp
	jne	.Lmul4x_enter
	jmp	.Lsqr4x_enter

.align	16
.Lmul_enter:
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15

	mov	${num}d,${num}d
	lea	2($num),%r10
	mov	%rsp,%r11
	neg	%r10
	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+2))
	and	\$-1024,%rsp		# minimize TLB usage

	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
.Lmul_body:
	mov	$bp,%r12		# reassign $bp
___
		$bp="%r12";
$code.=<<___;
	mov	($n0),$n0		# pull n0[0] value
	mov	($bp),$m0		# m0=bp[0]
	mov	($ap),%rax

	xor	$i,$i			# i=0
	xor	$j,$j			# j=0

	mov	$n0,$m1
	mulq	$m0			# ap[0]*bp[0]
	mov	%rax,$lo0
	mov	($np),%rax

	imulq	$lo0,$m1		# "tp[0]"*n0
	mov	%rdx,$hi0

	mulq	$m1			# np[0]*m1
	add	%rax,$lo0		# discarded
	mov	8($ap),%rax
	adc	\$0,%rdx
	mov	%rdx,$hi1

	lea	1($j),$j		# j++
	jmp	.L1st_enter

.align	16
.L1st:
	add	%rax,$hi1
	mov	($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
	mov	$lo0,$hi0
	adc	\$0,%rdx
	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$hi1

.L1st_enter:
	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$hi0
	mov	($np,$j,8),%rax
	adc	\$0,%rdx
	lea	1($j),$j		# j++
	mov	%rdx,$lo0

	mulq	$m1			# np[j]*m1
	cmp	$num,$j
	jne	.L1st

	add	%rax,$hi1
	mov	($ap),%rax		# ap[0]
	adc	\$0,%rdx
	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$hi1
	mov	$lo0,$hi0

	xor	%rdx,%rdx
	add	$hi0,$hi1
	adc	\$0,%rdx
	mov	$hi1,-8(%rsp,$num,8)
	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit

	lea	1($i),$i		# i++
	jmp	.Louter
.align	16
.Louter:
	mov	($bp,$i,8),$m0		# m0=bp[i]
	xor	$j,$j			# j=0
	mov	$n0,$m1
	mov	(%rsp),$lo0
	mulq	$m0			# ap[0]*bp[i]
	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
	mov	($np),%rax
	adc	\$0,%rdx

	imulq	$lo0,$m1		# tp[0]*n0
	mov	%rdx,$hi0

	mulq	$m1			# np[0]*m1
	add	%rax,$lo0		# discarded
	mov	8($ap),%rax
	adc	\$0,%rdx
	mov	8(%rsp),$lo0		# tp[1]
	mov	%rdx,$hi1

	lea	1($j),$j		# j++
	jmp	.Linner_enter

.align	16
.Linner:
	add	%rax,$hi1
	mov	($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
	mov	(%rsp,$j,8),$lo0
	adc	\$0,%rdx
	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$hi1

.Linner_enter:
	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$hi0
	mov	($np,$j,8),%rax
	adc	\$0,%rdx
	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
	mov	%rdx,$hi0
	adc	\$0,$hi0
	lea	1($j),$j		# j++

	mulq	$m1			# np[j]*m1
	cmp	$num,$j
	jne	.Linner

	add	%rax,$hi1
	mov	($ap),%rax		# ap[0]
	adc	\$0,%rdx
	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
	mov	(%rsp,$j,8),$lo0
	adc	\$0,%rdx
	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$hi1

	xor	%rdx,%rdx
	add	$hi0,$hi1
	adc	\$0,%rdx
	add	$lo0,$hi1		# pull upmost overflow bit
	adc	\$0,%rdx
	mov	$hi1,-8(%rsp,$num,8)
	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit

	lea	1($i),$i		# i++
	cmp	$num,$i
	jl	.Louter

	xor	$i,$i			# i=0 and clear CF!
	mov	(%rsp),%rax		# tp[0]
	lea	(%rsp),$ap		# borrow ap for tp
	mov	$num,$j			# j=num
	jmp	.Lsub
.align	16
.Lsub:	sbb	($np,$i,8),%rax
	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
	mov	8($ap,$i,8),%rax	# tp[i+1]
	lea	1($i),$i		# i++
	dec	$j			# doesnn't affect CF!
	jnz	.Lsub

	sbb	\$0,%rax		# handle upmost overflow bit
	xor	$i,$i
	and	%rax,$ap
	not	%rax
	mov	$rp,$np
	and	%rax,$np
	mov	$num,$j			# j=num
	or	$np,$ap			# ap=borrow?tp:rp
.align	16
.Lcopy:					# copy or in-place refresh
	mov	($ap,$i,8),%rax
	mov	$i,(%rsp,$i,8)		# zap temporary vector
	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]
	lea	1($i),$i
	sub	\$1,$j
	jnz	.Lcopy

	mov	8(%rsp,$num,8),%rsi	# restore %rsp
	mov	\$1,%rax
	mov	(%rsi),%r15
	mov	8(%rsi),%r14
	mov	16(%rsi),%r13
	mov	24(%rsi),%r12
	mov	32(%rsi),%rbp
	mov	40(%rsi),%rbx
	lea	48(%rsi),%rsp
.Lmul_epilogue:
	ret
.size	bn_mul_mont,.-bn_mul_mont
___
{{{
my @A=("%r10","%r11");
my @N=("%r13","%rdi");
$code.=<<___;
.type	bn_mul4x_mont,\@function,6
.align	16
bn_mul4x_mont:
.Lmul4x_enter:
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15

	mov	${num}d,${num}d
	lea	4($num),%r10
	mov	%rsp,%r11
	neg	%r10
	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+4))
	and	\$-1024,%rsp		# minimize TLB usage

	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
.Lmul4x_body:
	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
	mov	%rdx,%r12		# reassign $bp
___
		$bp="%r12";
$code.=<<___;
	mov	($n0),$n0		# pull n0[0] value
	mov	($bp),$m0		# m0=bp[0]
	mov	($ap),%rax

	xor	$i,$i			# i=0
	xor	$j,$j			# j=0

	mov	$n0,$m1
	mulq	$m0			# ap[0]*bp[0]
	mov	%rax,$A[0]
	mov	($np),%rax

	imulq	$A[0],$m1		# "tp[0]"*n0
	mov	%rdx,$A[1]

	mulq	$m1			# np[0]*m1
	add	%rax,$A[0]		# discarded
	mov	8($ap),%rax
	adc	\$0,%rdx
	mov	%rdx,$N[1]

	mulq	$m0
	add	%rax,$A[1]
	mov	8($np),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[0]

	mulq	$m1
	add	%rax,$N[1]
	mov	16($ap),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]
	lea	4($j),$j		# j++
	adc	\$0,%rdx
	mov	$N[1],(%rsp)
	mov	%rdx,$N[0]
	jmp	.L1st4x
.align	16
.L1st4x:
	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[0]
	mov	-16($np,$j,8),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	-8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[1]
	mov	-8($np,$j,8),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]

	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[0]
	mov	($np,$j,8),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[1]
	mov	8($np,$j,8),%rax
	adc	\$0,%rdx
	lea	4($j),$j		# j++
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	-16($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]
	cmp	$num,$j
	jl	.L1st4x

	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[0]
	mov	-16($np,$j,8),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	-8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[0]
	add	%rax,$A[1]
	mov	-8($np,$j,8),%rax
	adc	\$0,%rdx
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	($ap),%rax		# ap[0]
	adc	\$0,%rdx
	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
	adc	\$0,%rdx
	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]

	xor	$N[1],$N[1]
	add	$A[0],$N[0]
	adc	\$0,$N[1]
	mov	$N[0],-8(%rsp,$j,8)
	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit

	lea	1($i),$i		# i++
.align	4
.Louter4x:
	mov	($bp,$i,8),$m0		# m0=bp[i]
	xor	$j,$j			# j=0
	mov	(%rsp),$A[0]
	mov	$n0,$m1
	mulq	$m0			# ap[0]*bp[i]
	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
	mov	($np),%rax
	adc	\$0,%rdx

	imulq	$A[0],$m1		# tp[0]*n0
	mov	%rdx,$A[1]

	mulq	$m1			# np[0]*m1
	add	%rax,$A[0]		# "$N[0]", discarded
	mov	8($ap),%rax
	adc	\$0,%rdx
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[1]
	mov	8($np),%rax
	adc	\$0,%rdx
	add	8(%rsp),$A[1]		# +tp[1]
	adc	\$0,%rdx
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	16($ap),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
	lea	4($j),$j		# j+=2
	adc	\$0,%rdx
	mov	$N[1],(%rsp)		# tp[j-1]
	mov	%rdx,$N[0]
	jmp	.Linner4x
.align	16
.Linner4x:
	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[0]
	mov	-16($np,$j,8),%rax
	adc	\$0,%rdx
	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	-8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]
	adc	\$0,%rdx
	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[1]
	mov	-8($np,$j,8),%rax
	adc	\$0,%rdx
	add	-8(%rsp,$j,8),$A[1]
	adc	\$0,%rdx
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]
	adc	\$0,%rdx
	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[0]
	mov	($np,$j,8),%rax
	adc	\$0,%rdx
	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]
	adc	\$0,%rdx
	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[1]
	mov	8($np,$j,8),%rax
	adc	\$0,%rdx
	add	8(%rsp,$j,8),$A[1]
	adc	\$0,%rdx
	lea	4($j),$j		# j++
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	-16($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[1],$N[1]
	adc	\$0,%rdx
	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]
	cmp	$num,$j
	jl	.Linner4x

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[0]
	mov	-16($np,$j,8),%rax
	adc	\$0,%rdx
	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
	adc	\$0,%rdx
	mov	%rdx,$A[1]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[0]
	mov	-8($ap,$j,8),%rax
	adc	\$0,%rdx
	add	$A[0],$N[0]
	adc	\$0,%rdx
	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[1]

	mulq	$m0			# ap[j]*bp[i]
	add	%rax,$A[1]
	mov	-8($np,$j,8),%rax
	adc	\$0,%rdx
	add	-8(%rsp,$j,8),$A[1]
	adc	\$0,%rdx
	lea	1($i),$i		# i++
	mov	%rdx,$A[0]

	mulq	$m1			# np[j]*m1
	add	%rax,$N[1]
	mov	($ap),%rax		# ap[0]
	adc	\$0,%rdx
	add	$A[1],$N[1]
	adc	\$0,%rdx
	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
	mov	%rdx,$N[0]

	xor	$N[1],$N[1]
	add	$A[0],$N[0]
	adc	\$0,$N[1]
	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
	adc	\$0,$N[1]
	mov	$N[0],-8(%rsp,$j,8)
	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit

	cmp	$num,$i
	jl	.Louter4x
___
{
my @ri=("%rax","%rdx",$m0,$m1);
$code.=<<___;
	mov	16(%rsp,$num,8),$rp	# restore $rp
	mov	0(%rsp),@ri[0]		# tp[0]
	pxor	%xmm0,%xmm0
	mov	8(%rsp),@ri[1]		# tp[1]
	shr	\$2,$num		# num/=4
	lea	(%rsp),$ap		# borrow ap for tp
	xor	$i,$i			# i=0 and clear CF!

	sub	0($np),@ri[0]
	mov	16($ap),@ri[2]		# tp[2]
	mov	24($ap),@ri[3]		# tp[3]
	sbb	8($np),@ri[1]
	lea	-1($num),$j		# j=num/4-1
	jmp	.Lsub4x
.align	16
.Lsub4x:
	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	16($np,$i,8),@ri[2]
	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
	mov	40($ap,$i,8),@ri[1]
	sbb	24($np,$i,8),@ri[3]
	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	32($np,$i,8),@ri[0]
	mov	48($ap,$i,8),@ri[2]
	mov	56($ap,$i,8),@ri[3]
	sbb	40($np,$i,8),@ri[1]
	lea	4($i),$i		# i++
	dec	$j			# doesnn't affect CF!
	jnz	.Lsub4x

	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
	mov	32($ap,$i,8),@ri[0]	# load overflow bit
	sbb	16($np,$i,8),@ri[2]
	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	24($np,$i,8),@ri[3]
	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]

	sbb	\$0,@ri[0]		# handle upmost overflow bit
	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
	xor	$i,$i			# i=0
	and	@ri[0],$ap
	not	@ri[0]
	mov	$rp,$np
	and	@ri[0],$np
	lea	-1($num),$j
	or	$np,$ap			# ap=borrow?tp:rp

	movdqu	($ap),%xmm1
	movdqa	%xmm0,(%rsp)
	movdqu	%xmm1,($rp)
	jmp	.Lcopy4x
.align	16
.Lcopy4x:					# copy or in-place refresh
	movdqu	16($ap,$i),%xmm2
	movdqu	32($ap,$i),%xmm1
	movdqa	%xmm0,16(%rsp,$i)
	movdqu	%xmm2,16($rp,$i)
	movdqa	%xmm0,32(%rsp,$i)
	movdqu	%xmm1,32($rp,$i)
	lea	32($i),$i
	dec	$j
	jnz	.Lcopy4x

	shl	\$2,$num
	movdqu	16($ap,$i),%xmm2
	movdqa	%xmm0,16(%rsp,$i)
	movdqu	%xmm2,16($rp,$i)
___
}
$code.=<<___;
	mov	8(%rsp,$num,8),%rsi	# restore %rsp
	mov	\$1,%rax
	mov	(%rsi),%r15
	mov	8(%rsi),%r14
	mov	16(%rsi),%r13
	mov	24(%rsi),%r12
	mov	32(%rsi),%rbp
	mov	40(%rsi),%rbx
	lea	48(%rsi),%rsp
.Lmul4x_epilogue:
	ret
.size	bn_mul4x_mont,.-bn_mul4x_mont
___
}}}
{{{
######################################################################
# void bn_sqr4x_mont(
my $rptr="%rdi";	# const BN_ULONG *rptr,
my $aptr="%rsi";	# const BN_ULONG *aptr,
my $bptr="%rdx";	# not used
my $nptr="%rcx";	# const BN_ULONG *nptr,
my $n0  ="%r8";		# const BN_ULONG *n0);
my $num ="%r9";		# int num, has to be divisible by 4 and
			# not less than 8

my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
my @A0=("%r10","%r11");
my @A1=("%r12","%r13");
my ($a0,$a1,$ai)=("%r14","%r15","%rbx");

$code.=<<___;
.type	bn_sqr4x_mont,\@function,6
.align	16
bn_sqr4x_mont:
.Lsqr4x_enter:
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15

	shl	\$3,${num}d		# convert $num to bytes
	xor	%r10,%r10
	mov	%rsp,%r11		# put aside %rsp
	sub	$num,%r10		# -$num
	mov	($n0),$n0		# *n0
	lea	-72(%rsp,%r10,2),%rsp	# alloca(frame+2*$num)
	and	\$-1024,%rsp		# minimize TLB usage
	##############################################################
	# Stack layout
	#
	# +0	saved $num, used in reduction section
	# +8	&t[2*$num], used in reduction section
	# +32	saved $rptr
	# +40	saved $nptr
	# +48	saved *n0
	# +56	saved %rsp
	# +64	t[2*$num]
	#
	mov	$rptr,32(%rsp)		# save $rptr
	mov	$nptr,40(%rsp)
	mov	$n0,  48(%rsp)
	mov	%r11, 56(%rsp)		# save original %rsp
.Lsqr4x_body:
	##############################################################
	# Squaring part:
	#
	# a) multiply-n-add everything but a[i]*a[i];
	# b) shift result of a) by 1 to the left and accumulate
	#    a[i]*a[i] products;
	#
	lea	32(%r10),$i		# $i=-($num-32)
	lea	($aptr,$num),$aptr	# end of a[] buffer, ($aptr,$i)=&ap[2]

	mov	$num,$j			# $j=$num

					# comments apply to $num==8 case
	mov	-32($aptr,$i),$a0	# a[0]
	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr,$i),%rax	# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr,$i),$ai	# a[2]
	mov	%rax,$a1

	mul	$a0			# a[1]*a[0]
	mov	%rax,$A0[0]		# a[1]*a[0]
	 mov	$ai,%rax		# a[2]
	mov	%rdx,$A0[1]
	mov	$A0[0],-24($tptr,$i)	# t[1]

	xor	$A0[0],$A0[0]
	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	adc	%rdx,$A0[0]
	mov	$A0[1],-16($tptr,$i)	# t[2]

	lea	-16($i),$j		# j=-16


	 mov	8($aptr,$j),$ai		# a[3]
	mul	$a1			# a[2]*a[1]
	mov	%rax,$A1[0]		# a[2]*a[1]+t[3]
	 mov	$ai,%rax
	mov	%rdx,$A1[1]

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	 lea	16($j),$j
	adc	\$0,$A0[1]
	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[3]
	jmp	.Lsqr4x_1st

.align	16
.Lsqr4x_1st:
	 mov	($aptr,$j),$ai		# a[4]
	xor	$A1[0],$A1[0]
	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
	 mov	$ai,%rax
	adc	%rdx,$A1[0]

	xor	$A0[0],$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]
	mul	$a0			# a[4]*a[0]
	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
	 mov	$ai,%rax		# a[3]
	adc	%rdx,$A0[0]
	mov	$A0[1],($tptr,$j)	# t[4]


	 mov	8($aptr,$j),$ai		# a[5]
	xor	$A1[1],$A1[1]
	mul	$a1			# a[4]*a[3]
	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
	 mov	$ai,%rax
	adc	%rdx,$A1[1]

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]
	mul	$a0			# a[5]*a[2]
	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],8($tptr,$j)	# t[5]

	 mov	16($aptr,$j),$ai	# a[6]
	xor	$A1[0],$A1[0]
	mul	$a1			# a[5]*a[3]
	add	%rax,$A1[1]		# a[5]*a[3]+t[6]
	 mov	$ai,%rax
	adc	%rdx,$A1[0]

	xor	$A0[0],$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]
	mul	$a0			# a[6]*a[2]
	add	%rax,$A0[1]		# a[6]*a[2]+a[5]*a[3]+t[6]
	 mov	$ai,%rax		# a[3]
	adc	%rdx,$A0[0]
	mov	$A0[1],16($tptr,$j)	# t[6]


	 mov	24($aptr,$j),$ai	# a[7]
	xor	$A1[1],$A1[1]
	mul	$a1			# a[6]*a[5]
	add	%rax,$A1[0]		# a[6]*a[5]+t[7]
	 mov	$ai,%rax
	adc	%rdx,$A1[1]

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	 lea	32($j),$j
	adc	\$0,$A0[1]
	mul	$a0			# a[7]*a[4]
	add	%rax,$A0[0]		# a[7]*a[4]+a[6]*a[5]+t[6]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[7]

	cmp	\$0,$j
	jne	.Lsqr4x_1st

	xor	$A1[0],$A1[0]
	add	$A0[1],$A1[1]
	adc	\$0,$A1[0]
	mul	$a1			# a[7]*a[5]
	add	%rax,$A1[1]
	adc	%rdx,$A1[0]

	mov	$A1[1],($tptr)		# t[8]
	lea	16($i),$i
	mov	$A1[0],8($tptr)		# t[9]
	jmp	.Lsqr4x_outer

.align	16
.Lsqr4x_outer:				# comments apply to $num==6 case
	mov	-32($aptr,$i),$a0	# a[0]
	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr,$i),%rax	# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr,$i),$ai	# a[2]
	mov	%rax,$a1

	mov	-24($tptr,$i),$A0[0]	# t[1]
	xor	$A0[1],$A0[1]
	mul	$a0			# a[1]*a[0]
	add	%rax,$A0[0]		# a[1]*a[0]+t[1]
	 mov	$ai,%rax		# a[2]
	adc	%rdx,$A0[1]
	mov	$A0[0],-24($tptr,$i)	# t[1]

	xor	$A0[0],$A0[0]
	add	-16($tptr,$i),$A0[1]	# a[2]*a[0]+t[2]
	adc	\$0,$A0[0]
	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	adc	%rdx,$A0[0]
	mov	$A0[1],-16($tptr,$i)	# t[2]

	lea	-16($i),$j		# j=-16
	xor	$A1[0],$A1[0]


	 mov	8($aptr,$j),$ai		# a[3]
	xor	$A1[1],$A1[1]
	add	8($tptr,$j),$A1[0]
	adc	\$0,$A1[1]
	mul	$a1			# a[2]*a[1]
	add	%rax,$A1[0]		# a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	%rdx,$A1[1]

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]
	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],8($tptr,$j)	# t[3]

	lea	16($j),$j
	jmp	.Lsqr4x_inner

.align	16
.Lsqr4x_inner:
	 mov	($aptr,$j),$ai		# a[4]
	xor	$A1[0],$A1[0]
	add	($tptr,$j),$A1[1]
	adc	\$0,$A1[0]
	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
	 mov	$ai,%rax
	adc	%rdx,$A1[0]

	xor	$A0[0],$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]
	mul	$a0			# a[4]*a[0]
	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
	 mov	$ai,%rax		# a[3]
	adc	%rdx,$A0[0]
	mov	$A0[1],($tptr,$j)	# t[4]

	 mov	8($aptr,$j),$ai		# a[5]
	xor	$A1[1],$A1[1]
	add	8($tptr,$j),$A1[0]
	adc	\$0,$A1[1]
	mul	$a1			# a[4]*a[3]
	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
	 mov	$ai,%rax
	adc	%rdx,$A1[1]

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	lea	16($j),$j		# j++
	adc	\$0,$A0[1]
	mul	$a0			# a[5]*a[2]
	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[5], "preloaded t[1]" below

	cmp	\$0,$j
	jne	.Lsqr4x_inner

	xor	$A1[0],$A1[0]
	add	$A0[1],$A1[1]
	adc	\$0,$A1[0]
	mul	$a1			# a[5]*a[3]
	add	%rax,$A1[1]
	adc	%rdx,$A1[0]

	mov	$A1[1],($tptr)		# t[6], "preloaded t[2]" below
	mov	$A1[0],8($tptr)		# t[7], "preloaded t[3]" below

	add	\$16,$i
	jnz	.Lsqr4x_outer

					# comments apply to $num==4 case
	mov	-32($aptr),$a0		# a[0]
	lea	64(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr),%rax		# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr),$ai		# a[2]
	mov	%rax,$a1

	xor	$A0[1],$A0[1]
	mul	$a0			# a[1]*a[0]
	add	%rax,$A0[0]		# a[1]*a[0]+t[1], preloaded t[1]
	 mov	$ai,%rax		# a[2]
	adc	%rdx,$A0[1]
	mov	$A0[0],-24($tptr)	# t[1]

	xor	$A0[0],$A0[0]
	add	$A1[1],$A0[1]		# a[2]*a[0]+t[2], preloaded t[2]
	adc	\$0,$A0[0]
	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	adc	%rdx,$A0[0]
	mov	$A0[1],-16($tptr)	# t[2]

	 mov	-8($aptr),$ai		# a[3]
	mul	$a1			# a[2]*a[1]
	add	%rax,$A1[0]		# a[2]*a[1]+t[3], preloaded t[3]
	 mov	$ai,%rax
	adc	\$0,%rdx

	xor	$A0[1],$A0[1]
	add	$A1[0],$A0[0]
	 mov	%rdx,$A1[1]
	adc	\$0,$A0[1]
	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	%rdx,$A0[1]
	mov	$A0[0],-8($tptr)	# t[3]

	xor	$A1[0],$A1[0]
	add	$A0[1],$A1[1]
	adc	\$0,$A1[0]
	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]
	 mov	-16($aptr),%rax		# a[2]
	adc	%rdx,$A1[0]

	mov	$A1[1],($tptr)		# t[4]
	mov	$A1[0],8($tptr)		# t[5]

	mul	$ai			# a[2]*a[3]
___
{
my ($shift,$carry)=($a0,$a1);
my @S=(@A1,$ai,$n0);
$code.=<<___;
	 add	\$16,$i
	 xor	$shift,$shift
	 sub	$num,$i			# $i=16-$num
	 xor	$carry,$carry

	add	$A1[0],%rax		# t[5]
	adc	\$0,%rdx
	mov	%rax,8($tptr)		# t[5]
	mov	%rdx,16($tptr)		# t[6]
	mov	$carry,24($tptr)	# t[7]

	 mov	-16($aptr,$i),%rax	# a[0]
	lea	64(%rsp,$num,2),$tptr
	 xor	$A0[0],$A0[0]		# t[0]
	 mov	-24($tptr,$i,2),$A0[1]	# t[1]

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	-16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	-8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],-32($tptr,$i,2)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],-24($tptr,$i,2)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	0($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],-16($tptr,$i,2)
	adc	%rdx,$S[3]
	lea	16($i),$i
	mov	$S[3],-40($tptr,$i,2)
	sbb	$carry,$carry		# mov cf,$carry
	jmp	.Lsqr4x_shift_n_add

.align	16
.Lsqr4x_shift_n_add:
	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	-16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	-8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],-32($tptr,$i,2)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],-24($tptr,$i,2)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	0($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	8($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],-16($tptr,$i,2)
	adc	%rdx,$S[3]

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	 mov	$S[3],-8($tptr,$i,2)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	16($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	24($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],0($tptr,$i,2)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],8($tptr,$i,2)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	32($tptr,$i,2),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	40($tptr,$i,2),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	16($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],16($tptr,$i,2)
	adc	%rdx,$S[3]
	mov	$S[3],24($tptr,$i,2)
	sbb	$carry,$carry		# mov cf,$carry
	add	\$32,$i
	jnz	.Lsqr4x_shift_n_add

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr),%rax		# a[i+1]	# prefetch
	mov	$S[0],-32($tptr)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1|shift
	 mov	$S[1],-24($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	adc	%rax,$S[2]
	adc	%rdx,$S[3]
	mov	$S[2],-16($tptr)
	mov	$S[3],-8($tptr)
___
}
##############################################################
# Montgomery reduction part, "word-by-word" algorithm.
#
{
my ($topbit,$nptr)=("%rbp",$aptr);
my ($m0,$m1)=($a0,$a1);
my @Ni=("%rbx","%r9");
$code.=<<___;
	mov	40(%rsp),$nptr		# restore $nptr
	mov	48(%rsp),$n0		# restore *n0
	xor	$j,$j
	mov	$num,0(%rsp)		# save $num
	sub	$num,$j			# $j=-$num
	 mov	64(%rsp),$A0[0]		# t[0]		# modsched #
	 mov	$n0,$m0			#		# modsched #
	lea	64(%rsp,$num,2),%rax	# end of t[] buffer
	lea	64(%rsp,$num),$tptr	# end of t[] window
	mov	%rax,8(%rsp)		# save end of t[] buffer
	lea	($nptr,$num),$nptr	# end of n[] buffer
	xor	$topbit,$topbit		# $topbit=0

	mov	0($nptr,$j),%rax	# n[0]		# modsched #
	mov	8($nptr,$j),$Ni[1]	# n[1]		# modsched #
	 imulq	$A0[0],$m0		# m0=t[0]*n0	# modsched #
	 mov	%rax,$Ni[0]		#		# modsched #
	jmp	.Lsqr4x_mont_outer

.align	16
.Lsqr4x_mont_outer:
	xor	$A0[1],$A0[1]
	mul	$m0			# n[0]*m0
	add	%rax,$A0[0]		# n[0]*m0+t[0]
	 mov	$Ni[1],%rax
	adc	%rdx,$A0[1]
	mov	$n0,$m1

	xor	$A0[0],$A0[0]
	add	8($tptr,$j),$A0[1]
	adc	\$0,$A0[0]
	mul	$m0			# n[1]*m0
	add	%rax,$A0[1]		# n[1]*m0+t[1]
	 mov	$Ni[0],%rax
	adc	%rdx,$A0[0]

	imulq	$A0[1],$m1

	mov	16($nptr,$j),$Ni[0]	# n[2]
	xor	$A1[1],$A1[1]
	add	$A0[1],$A1[0]
	adc	\$0,$A1[1]
	mul	$m1			# n[0]*m1
	add	%rax,$A1[0]		# n[0]*m1+"t[1]"
	 mov	$Ni[0],%rax
	adc	%rdx,$A1[1]
	mov	$A1[0],8($tptr,$j)	# "t[1]"

	xor	$A0[1],$A0[1]
	add	16($tptr,$j),$A0[0]
	adc	\$0,$A0[1]
	mul	$m0			# n[2]*m0
	add	%rax,$A0[0]		# n[2]*m0+t[2]
	 mov	$Ni[1],%rax
	adc	%rdx,$A0[1]

	mov	24($nptr,$j),$Ni[1]	# n[3]
	xor	$A1[0],$A1[0]
	add	$A0[0],$A1[1]
	adc	\$0,$A1[0]
	mul	$m1			# n[1]*m1
	add	%rax,$A1[1]		# n[1]*m1+"t[2]"
	 mov	$Ni[1],%rax
	adc	%rdx,$A1[0]
	mov	$A1[1],16($tptr,$j)	# "t[2]"

	xor	$A0[0],$A0[0]
	add	24($tptr,$j),$A0[1]
	lea	32($j),$j
	adc	\$0,$A0[0]
	mul	$m0			# n[3]*m0
	add	%rax,$A0[1]		# n[3]*m0+t[3]
	 mov	$Ni[0],%rax
	adc	%rdx,$A0[0]
	jmp	.Lsqr4x_mont_inner

.align	16
.Lsqr4x_mont_inner:
	mov	($nptr,$j),$Ni[0]	# n[4]
	xor	$A1[1],$A1[1]
	add	$A0[1],$A1[0]
	adc	\$0,$A1[1]
	mul	$m1			# n[2]*m1
	add	%rax,$A1[0]		# n[2]*m1+"t[3]"
	 mov	$Ni[0],%rax
	adc	%rdx,$A1[1]
	mov	$A1[0],-8($tptr,$j)	# "t[3]"

	xor	$A0[1],$A0[1]
	add	($tptr,$j),$A0[0]
	adc	\$0,$A0[1]
	mul	$m0			# n[4]*m0
	add	%rax,$A0[0]		# n[4]*m0+t[4]
	 mov	$Ni[1],%rax
	adc	%rdx,$A0[1]

	mov	8($nptr,$j),$Ni[1]	# n[5]
	xor	$A1[0],$A1[0]
	add	$A0[0],$A1[1]
	adc	\$0,$A1[0]
	mul	$m1			# n[3]*m1
	add	%rax,$A1[1]		# n[3]*m1+"t[4]"
	 mov	$Ni[1],%rax
	adc	%rdx,$A1[0]
	mov	$A1[1],($tptr,$j)	# "t[4]"

	xor	$A0[0],$A0[0]
	add	8($tptr,$j),$A0[1]
	adc	\$0,$A0[0]
	mul	$m0			# n[5]*m0
	add	%rax,$A0[1]		# n[5]*m0+t[5]
	 mov	$Ni[0],%rax
	adc	%rdx,$A0[0]


	mov	16($nptr,$j),$Ni[0]	# n[6]
	xor	$A1[1],$A1[1]
	add	$A0[1],$A1[0]
	adc	\$0,$A1[1]
	mul	$m1			# n[4]*m1
	add	%rax,$A1[0]		# n[4]*m1+"t[5]"
	 mov	$Ni[0],%rax
	adc	%rdx,$A1[1]
	mov	$A1[0],8($tptr,$j)	# "t[5]"

	xor	$A0[1],$A0[1]
	add	16($tptr,$j),$A0[0]
	adc	\$0,$A0[1]
	mul	$m0			# n[6]*m0
	add	%rax,$A0[0]		# n[6]*m0+t[6]
	 mov	$Ni[1],%rax
	adc	%rdx,$A0[1]

	mov	24($nptr,$j),$Ni[1]	# n[7]
	xor	$A1[0],$A1[0]
	add	$A0[0],$A1[1]
	adc	\$0,$A1[0]
	mul	$m1			# n[5]*m1
	add	%rax,$A1[1]		# n[5]*m1+"t[6]"
	 mov	$Ni[1],%rax
	adc	%rdx,$A1[0]
	mov	$A1[1],16($tptr,$j)	# "t[6]"

	xor	$A0[0],$A0[0]
	add	24($tptr,$j),$A0[1]
	lea	32($j),$j
	adc	\$0,$A0[0]
	mul	$m0			# n[7]*m0
	add	%rax,$A0[1]		# n[7]*m0+t[7]
	 mov	$Ni[0],%rax
	adc	%rdx,$A0[0]
	cmp	\$0,$j
	jne	.Lsqr4x_mont_inner

	 sub	0(%rsp),$j		# $j=-$num	# modsched #
	 mov	$n0,$m0			#		# modsched #

	xor	$A1[1],$A1[1]
	add	$A0[1],$A1[0]
	adc	\$0,$A1[1]
	mul	$m1			# n[6]*m1
	add	%rax,$A1[0]		# n[6]*m1+"t[7]"
	mov	$Ni[1],%rax
	adc	%rdx,$A1[1]
	mov	$A1[0],-8($tptr)	# "t[7]"

	xor	$A0[1],$A0[1]
	add	($tptr),$A0[0]		# +t[8]
	adc	\$0,$A0[1]
	 mov	0($nptr,$j),$Ni[0]	# n[0]		# modsched #
	add	$topbit,$A0[0]
	adc	\$0,$A0[1]

	 imulq	16($tptr,$j),$m0	# m0=t[0]*n0	# modsched #
	xor	$A1[0],$A1[0]
	 mov	8($nptr,$j),$Ni[1]	# n[1]		# modsched #
	add	$A0[0],$A1[1]
	 mov	16($tptr,$j),$A0[0]	# t[0]		# modsched #
	adc	\$0,$A1[0]
	mul	$m1			# n[7]*m1
	add	%rax,$A1[1]		# n[7]*m1+"t[8]"
	 mov	$Ni[0],%rax		#		# modsched #
	adc	%rdx,$A1[0]
	mov	$A1[1],($tptr)		# "t[8]"

	xor	$topbit,$topbit
	add	8($tptr),$A1[0]		# +t[9]
	adc	$topbit,$topbit
	add	$A0[1],$A1[0]
	lea	16($tptr),$tptr		# "t[$num]>>128"
	adc	\$0,$topbit
	mov	$A1[0],-8($tptr)	# "t[9]"
	cmp	8(%rsp),$tptr		# are we done?
	jb	.Lsqr4x_mont_outer

	mov	0(%rsp),$num		# restore $num
	mov	$topbit,($tptr)		# save $topbit
___
}
##############################################################
# Post-condition, 4x unrolled copy from bn_mul_mont
#
{
my ($tptr,$nptr)=("%rbx",$aptr);
my @ri=("%rax","%rdx","%r10","%r11");
$code.=<<___;
	mov	64(%rsp,$num),@ri[0]	# tp[0]
	lea	64(%rsp,$num),$tptr	# upper half of t[2*$num] holds result
	mov	40(%rsp),$nptr		# restore $nptr
	shr	\$5,$num		# num/4
	mov	8($tptr),@ri[1]		# t[1]
	xor	$i,$i			# i=0 and clear CF!

	mov	32(%rsp),$rptr		# restore $rptr
	sub	0($nptr),@ri[0]
	mov	16($tptr),@ri[2]	# t[2]
	mov	24($tptr),@ri[3]	# t[3]
	sbb	8($nptr),@ri[1]
	lea	-1($num),$j		# j=num/4-1
	jmp	.Lsqr4x_sub
.align	16
.Lsqr4x_sub:
	mov	@ri[0],0($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	mov	@ri[1],8($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	16($nptr,$i,8),@ri[2]
	mov	32($tptr,$i,8),@ri[0]	# tp[i+1]
	mov	40($tptr,$i,8),@ri[1]
	sbb	24($nptr,$i,8),@ri[3]
	mov	@ri[2],16($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	mov	@ri[3],24($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	32($nptr,$i,8),@ri[0]
	mov	48($tptr,$i,8),@ri[2]
	mov	56($tptr,$i,8),@ri[3]
	sbb	40($nptr,$i,8),@ri[1]
	lea	4($i),$i		# i++
	dec	$j			# doesn't affect CF!
	jnz	.Lsqr4x_sub

	mov	@ri[0],0($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	mov	32($tptr,$i,8),@ri[0]	# load overflow bit
	sbb	16($nptr,$i,8),@ri[2]
	mov	@ri[1],8($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	sbb	24($nptr,$i,8),@ri[3]
	mov	@ri[2],16($rptr,$i,8)	# rp[i]=tp[i]-np[i]

	sbb	\$0,@ri[0]		# handle upmost overflow bit
	mov	@ri[3],24($rptr,$i,8)	# rp[i]=tp[i]-np[i]
	xor	$i,$i			# i=0
	and	@ri[0],$tptr
	not	@ri[0]
	mov	$rptr,$nptr
	and	@ri[0],$nptr
	lea	-1($num),$j
	or	$nptr,$tptr		# tp=borrow?tp:rp

	pxor	%xmm0,%xmm0
	lea	64(%rsp,$num,8),$nptr
	movdqu	($tptr),%xmm1
	lea	($nptr,$num,8),$nptr
	movdqa	%xmm0,64(%rsp)		# zap lower half of temporary vector
	movdqa	%xmm0,($nptr)		# zap upper half of temporary vector
	movdqu	%xmm1,($rptr)
	jmp	.Lsqr4x_copy
.align	16
.Lsqr4x_copy:				# copy or in-place refresh
	movdqu	16($tptr,$i),%xmm2
	movdqu	32($tptr,$i),%xmm1
	movdqa	%xmm0,80(%rsp,$i)	# zap lower half of temporary vector
	movdqa	%xmm0,96(%rsp,$i)	# zap lower half of temporary vector
	movdqa	%xmm0,16($nptr,$i)	# zap upper half of temporary vector
	movdqa	%xmm0,32($nptr,$i)	# zap upper half of temporary vector
	movdqu	%xmm2,16($rptr,$i)
	movdqu	%xmm1,32($rptr,$i)
	lea	32($i),$i
	dec	$j
	jnz	.Lsqr4x_copy

	movdqu	16($tptr,$i),%xmm2
	movdqa	%xmm0,80(%rsp,$i)	# zap lower half of temporary vector
	movdqa	%xmm0,16($nptr,$i)	# zap upper half of temporary vector
	movdqu	%xmm2,16($rptr,$i)
___
}
$code.=<<___;
	mov	56(%rsp),%rsi		# restore %rsp
	mov	\$1,%rax
	mov	0(%rsi),%r15
	mov	8(%rsi),%r14
	mov	16(%rsi),%r13
	mov	24(%rsi),%r12
	mov	32(%rsi),%rbp
	mov	40(%rsi),%rbx
	lea	48(%rsi),%rsp
.Lsqr4x_epilogue:
	ret
.size	bn_sqr4x_mont,.-bn_sqr4x_mont
___
}}}
$code.=<<___;
.asciz	"Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
.align	16
___

# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";

$code.=<<___;
.extern	__imp_RtlVirtualUnwind
.type	mul_handler,\@abi-omnipotent
.align	16
mul_handler:
	push	%rsi
	push	%rdi
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15
	pushfq
	sub	\$64,%rsp

	mov	120($context),%rax	# pull context->Rax
	mov	248($context),%rbx	# pull context->Rip

	mov	8($disp),%rsi		# disp->ImageBase
	mov	56($disp),%r11		# disp->HandlerData

	mov	0(%r11),%r10d		# HandlerData[0]
	lea	(%rsi,%r10),%r10	# end of prologue label
	cmp	%r10,%rbx		# context->Rip<end of prologue label
	jb	.Lcommon_seh_tail

	mov	152($context),%rax	# pull context->Rsp

	mov	4(%r11),%r10d		# HandlerData[1]
	lea	(%rsi,%r10),%r10	# epilogue label
	cmp	%r10,%rbx		# context->Rip>=epilogue label
	jae	.Lcommon_seh_tail

	mov	192($context),%r10	# pull $num
	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
	lea	48(%rax),%rax

	mov	-8(%rax),%rbx
	mov	-16(%rax),%rbp
	mov	-24(%rax),%r12
	mov	-32(%rax),%r13
	mov	-40(%rax),%r14
	mov	-48(%rax),%r15
	mov	%rbx,144($context)	# restore context->Rbx
	mov	%rbp,160($context)	# restore context->Rbp
	mov	%r12,216($context)	# restore context->R12
	mov	%r13,224($context)	# restore context->R13
	mov	%r14,232($context)	# restore context->R14
	mov	%r15,240($context)	# restore context->R15

	jmp	.Lcommon_seh_tail
.size	mul_handler,.-mul_handler

.type	sqr_handler,\@abi-omnipotent
.align	16
sqr_handler:
	push	%rsi
	push	%rdi
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15
	pushfq
	sub	\$64,%rsp

	mov	120($context),%rax	# pull context->Rax
	mov	248($context),%rbx	# pull context->Rip

	lea	.Lsqr4x_body(%rip),%r10
	cmp	%r10,%rbx		# context->Rip<.Lsqr_body
	jb	.Lcommon_seh_tail

	mov	152($context),%rax	# pull context->Rsp

	lea	.Lsqr4x_epilogue(%rip),%r10
	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
	jae	.Lcommon_seh_tail

	mov	56(%rax),%rax		# pull saved stack pointer
	lea	48(%rax),%rax

	mov	-8(%rax),%rbx
	mov	-16(%rax),%rbp
	mov	-24(%rax),%r12
	mov	-32(%rax),%r13
	mov	-40(%rax),%r14
	mov	-48(%rax),%r15
	mov	%rbx,144($context)	# restore context->Rbx
	mov	%rbp,160($context)	# restore context->Rbp
	mov	%r12,216($context)	# restore context->R12
	mov	%r13,224($context)	# restore context->R13
	mov	%r14,232($context)	# restore context->R14
	mov	%r15,240($context)	# restore context->R15

.Lcommon_seh_tail:
	mov	8(%rax),%rdi
	mov	16(%rax),%rsi
	mov	%rax,152($context)	# restore context->Rsp
	mov	%rsi,168($context)	# restore context->Rsi
	mov	%rdi,176($context)	# restore context->Rdi

	mov	40($disp),%rdi		# disp->ContextRecord
	mov	$context,%rsi		# context
	mov	\$154,%ecx		# sizeof(CONTEXT)
	.long	0xa548f3fc		# cld; rep movsq

	mov	$disp,%rsi
	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
	mov	0(%rsi),%r8		# arg3, disp->ControlPc
	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
	mov	40(%rsi),%r10		# disp->ContextRecord
	lea	56(%rsi),%r11		# &disp->HandlerData
	lea	24(%rsi),%r12		# &disp->EstablisherFrame
	mov	%r10,32(%rsp)		# arg5
	mov	%r11,40(%rsp)		# arg6
	mov	%r12,48(%rsp)		# arg7
	mov	%rcx,56(%rsp)		# arg8, (NULL)
	call	*__imp_RtlVirtualUnwind(%rip)

	mov	\$1,%eax		# ExceptionContinueSearch
	add	\$64,%rsp
	popfq
	pop	%r15
	pop	%r14
	pop	%r13
	pop	%r12
	pop	%rbp
	pop	%rbx
	pop	%rdi
	pop	%rsi
	ret
.size	sqr_handler,.-sqr_handler

.section	.pdata
.align	4
	.rva	.LSEH_begin_bn_mul_mont
	.rva	.LSEH_end_bn_mul_mont
	.rva	.LSEH_info_bn_mul_mont

	.rva	.LSEH_begin_bn_mul4x_mont
	.rva	.LSEH_end_bn_mul4x_mont
	.rva	.LSEH_info_bn_mul4x_mont

	.rva	.LSEH_begin_bn_sqr4x_mont
	.rva	.LSEH_end_bn_sqr4x_mont
	.rva	.LSEH_info_bn_sqr4x_mont

.section	.xdata
.align	8
.LSEH_info_bn_mul_mont:
	.byte	9,0,0,0
	.rva	mul_handler
	.rva	.Lmul_body,.Lmul_epilogue	# HandlerData[]
.LSEH_info_bn_mul4x_mont:
	.byte	9,0,0,0
	.rva	mul_handler
	.rva	.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
.LSEH_info_bn_sqr4x_mont:
	.byte	9,0,0,0
	.rva	sqr_handler
___
}

print $code;
close STDOUT;
N4m3
5!z3
L45t M0d!f!3d
0wn3r / Gr0up
P3Rm!55!0n5
0pt!0n5
..
--
April 07 2014 16:55:28
0 / 0
0755
x86
--
December 16 2014 08:29:27
0 / 0
0755
README
1.046 KB
March 17 2014 16:14:20
0 / 0
0664
alpha-mont.pl
5.457 KB
March 17 2014 16:14:20
0 / 0
0664
armv4-gf2m.pl
6.608 KB
March 17 2014 16:14:20
0 / 0
0664
armv4-mont.pl
5.315 KB
March 17 2014 16:14:20
0 / 0
0664
bn-586.pl
16.035 KB
March 17 2014 16:14:20
0 / 0
0664
co-586.pl
5.508 KB
March 17 2014 16:14:20
0 / 0
0664
ia64-mont.pl
25.411 KB
March 17 2014 16:14:20
0 / 0
0664
ia64.S
44.252 KB
March 17 2014 16:14:20
0 / 0
0664
mips-mont.pl
8.669 KB
March 17 2014 16:14:20
0 / 0
0664
mips.pl
49.1 KB
March 17 2014 16:14:20
0 / 0
0664
mips3-mont.pl
5.236 KB
March 17 2014 16:14:20
0 / 0
0664
mips3.s
36.732 KB
March 17 2014 16:14:20
0 / 0
0664
modexp512-x86_64.pl
33.71 KB
March 17 2014 16:14:20
0 / 0
0664
pa-risc2.s
47.46 KB
March 17 2014 16:14:20
0 / 0
0664
pa-risc2W.s
45.58 KB
March 17 2014 16:14:20
0 / 0
0664
parisc-mont.pl
26.392 KB
March 17 2014 16:14:20
0 / 0
0664
ppc-mont.pl
7.413 KB
March 17 2014 16:14:20
0 / 0
0664
ppc.pl
43.918 KB
March 17 2014 16:14:20
0 / 0
0664
ppc64-mont.pl
25.824 KB
March 17 2014 16:14:20
0 / 0
0664
s390x-gf2m.pl
5.019 KB
March 17 2014 16:14:20
0 / 0
0664
s390x-mont.pl
6.522 KB
March 17 2014 16:14:20
0 / 0
0664
s390x.S
12.34 KB
March 17 2014 16:14:20
0 / 0
0775
sparcv8.S
27.583 KB
March 17 2014 16:14:20
0 / 0
0664
sparcv8plus.S
32.354 KB
March 17 2014 16:14:20
0 / 0
0664
sparcv9-mont.pl
13.39 KB
March 17 2014 16:14:20
0 / 0
0664
sparcv9a-mont.pl
20.237 KB
March 17 2014 16:14:20
0 / 0
0775
via-mont.pl
8.779 KB
March 17 2014 16:14:20
0 / 0
0664
vms.mar
106.89 KB
March 17 2014 16:14:20
0 / 0
0664
x86-gf2m.pl
7.532 KB
March 17 2014 16:14:20
0 / 0
0664
x86-mont.pl
16.082 KB
March 17 2014 16:14:20
0 / 0
0775
x86.pl
0.608 KB
March 17 2014 16:14:20
0 / 0
0664
x86_64-gcc.c
13.218 KB
March 17 2014 16:14:20
0 / 0
0664
x86_64-gf2m.pl
8.376 KB
March 17 2014 16:14:20
0 / 0
0664
x86_64-mont.pl
36.119 KB
March 17 2014 16:14:20
0 / 0
0775
x86_64-mont5.pl
21.961 KB
March 17 2014 16:14:20
0 / 0
0775
 $.' ",#(7),01444'9=82<.342ÿÛ C  2!!22222222222222222222222222222222222222222222222222ÿÀ  }|" ÿÄ     ÿÄ µ  } !1AQa "q2‘¡#B±ÁRÑð$3br‚ %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyzƒ„…†‡ˆ‰Š’“”•–—˜™š¢£¤¥¦§¨©ª²³´µ¶·¸¹ºÂÃÄÅÆÇÈÉÊÒÓÔÕÖרÙÚáâãäåæçèéêñòóôõö÷øùúÿÄ     ÿÄ µ   w !1AQ aq"2B‘¡±Á #3RðbrÑ $4á%ñ&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz‚ƒ„…†‡ˆ‰Š’“”•–—˜™š¢£¤¥¦§¨©ª²³´µ¶·¸¹ºÂÃÄÅÆÇÈÉÊÒÓÔÕÖרÙÚâãäåæçèéêòóôõö÷øùúÿÚ   ? ÷HR÷j¹ûA <̃.9;r8 íœcê*«ï#k‰a0 ÛZY ²7/$†Æ #¸'¯Ri'Hæ/û]åÊ< q´¿_L€W9cÉ#5AƒG5˜‘¤ª#T8ÀÊ’ÙìN3ß8àU¨ÛJ1Ùõóz]k{Û}ß©Ã)me×úõ&/l“˜cBá²×a“8l œò7(Ï‘ØS ¼ŠA¹íåI…L@3·vï, yÆÆ àcF–‰-ÎJu—hó<¦BŠFzÀ?tãúguR‹u#‡{~?Ú•£=n¾qo~öôüô¸¾³$õüÑ»jò]Mä¦  >ÎÈ[¢à–?) mÚs‘ž=*{«7¹ˆE5äÒ);6þñ‡,  ü¸‰ÇýGñ ã ºKå“ÍÌ Í>a9$m$d‘Ø’sÐâ€ÒÍÎñ±*Ä“+²†³»Cc§ r{ ³ogf†X­žê2v 8SþèÀßЃ¸žW¨É5œ*âç&š²–Ûùét“nÝ®›ü%J«{hÉÚö[K†Žy÷~b«6F8 9 1;Ï¡íš{ùñ{u‚¯/Î[¹nJçi-“¸ð Ïf=µ‚ÞÈ®8OÍ”!c H%N@<ŽqÈlu"š…xHm®ä<*ó7•…Á Á#‡|‘Ó¦õq“êífÛüŸ•­oNÚ{ËFý;– ŠÙ–!½Òq–‹væRqŒ®?„ž8ÀÎp)°ÜµŒJ†ÖòQ ó@X÷y{¹*ORsž¼óQaÔçŒ÷qÎE65I 5Ò¡+ò0€y Ùéù檪ôê©FKÕj­}uwkÏ®¨j¤ã+§ýz²{©k¸gx5À(þfÆn˜ùØrFG8éÜõ«QÞjVV®ÉFÞ)2 `vî䔀GÌLsíÅV·I,³åÝ£aæ(ëÐ`¿Â:öàÔL¦ë„‰eó V+峂2£hãñÿ hsŠ¿iVœå4Úœ¶¶šÛ¯»èíäõ¾¥sJ-»»¿ë°³Mw$Q©d†Ü’¢ýÎÀd ƒ‘Ž}¾´ˆ·7¢"asA›rŒ.v@ ÞÇj”Y´%Š–·–5\Ü²õåË2Hã×­°*¾d_(˜»#'<ŒîØ1œuþ!ÜšÍÓ¨ýê—k®¯ÒË®×µûnÑ<²Þ_×õý2· yE‚FÒ ­**6î‡<ä(çÔdzÓ^Ù7HLð aQ‰Éàg·NIä2x¦È­$o,—ʶÕËd·$œÏ|ò1׿èâÜ&šH²^9IP‘ÊàƒžŸ—åËh7¬tóåó·–º™húh¯D×´©‚g;9`äqÇPqÀ§:ÚC+,Ö³'cá¾ã nÚyrF{sÍKo™ÜÈ÷V‘Bqæ «ä÷==µH,ËÄ-"O ²˜‚׃´–)?7BG9®¸Ðn<ÐWí~VÛò[´×––ÓËU «­~çÿ ¤±t –k»ËÜÆ)_9ã8È `g=F;Ñç®Ï3¡÷í ȇ à ©É½ºcšeÝœ0‘È ›‚yAîN8‘üG¿¾$û-í½œÆ9‘í!ˆ9F9çxëøž*o_žIÆÖZò¥ÓºVùöõ¿w¦Ýˆæ•´ÓYÄ®­³ËV£êƒæõç?áNòîn.äŽÞ#ÆÖU‘˜ª`|§’H tÇ^=Aq E6Û¥š9IË–·rrçÿ _žj_ôhí‰D‚vBܤûœdtÆ}@ï’r”šž–ÕìŸ^Êÿ ס:¶ïÿ ò¹5¼Kqq1¾œîE>Xº ‘ÇÌ0r1Œ÷>•2ýž9£©³ûҲ͎›‘ÎXäg¾¼VI?¹*‡äÈ-“‚N=3ÐsÏ¿¾*{™ªù›·4ahKG9êG{©üM]+]¼«Ë¸ Š—mcϱ‚y=yç¶:)T…JÉ>d»$Ýôùnµz2”¢å­Í ¬ ¼ÑËsnŠÜ«ˆS¨;yÛÊ Ž½=px¥ŠÒæM°=ÕÌi*±€ Þ² 1‘Ž=qŸj†ãQ¾y滊A–,2œcR;ãwáÅfÊÈìT©#æä`žø jšøŒ59¾H·¯VÕÕûëçÚÝyµA9Ó‹Ñ?Çúþºš—QÇ ÔvòßNqù«¼!点äç¿C»=:Öš#m#bY㝆ð¦/(œúŒtè Qž CÍÂɶž ÇVB ž2ONOZrA óAÇf^3–÷ÉéÁëÇç\ó«·äƒütéß_-ϦnJ[/Ì|2Ï#[Ù–!’,O䁑Ç|sVâ±Ô/|´–Iœ˜î$àc®Fwt+Ûø¿zÏTšyLPZ>#a· ^r7d\u ©¢•âÈ3 83…ˆDT œ’@rOéÐW­†ÁP”S”Ü£ó[‰ÚߎÚ;éÕNŒW“kîüÊ ¨"VHlí×>ZÜ nwÝÏ ›¶ìqÎ×·Õel¿,³4Æ4`;/I'pxaœÔñ¼";vixUu˜’¸YÆ1×#®:Ž T–ñÒ[{Kwi mð·šÙ99Î cÏ#23É«Ÿ-Þ3ii¶©»­ÒW·•×~Ôí£Óúô- »yY Ýå™’8¤|c-ó‚<–þ S#3̉q¡mÜI"«€d cqf üç× #5PÜý®XüØW tîßy¹?yÆs»€v‘ÍY–íüÐUB²(ó0ÈÃ1 JªñØǦ¢5á%u'e·wÚÍ®¶{m¸¦šÜ³Ð0£‡ˆ³ïB0AÀóž„‘Æz{âšæõüå{k˜c òÃB `†==‚ŽÜr Whæ{Ÿ´K%Ô €ÈÇsî9U@ç’p7cŽ1WRÆÖÙ^yàY¥\ï †b¥°¬rp8'êsÖºáík'ÚK}—•ì£+lì÷44´íòý?«Ö÷0¤I"Ú³.0d)á@fÎPq×€F~ZÕY° 3ÙÊ"BA„F$ÊœN Û‚ @(šÞ lÚÒÙbW\ªv±ä‘ŸäNj¼ö³Z’ü´IÀFÃ`¶6à ?! NxÇÒ©Ò­†Oª²½’·ŸM¶{êºjÚqŒ©®èþ ‰ ’&yL%?yÕÔ®$•Ï\p4—:…À—u½ä‘°Ýæ$aCß”$ñŸoÄÙ>TÓù¦ƒÂKÆÅÉ@¹'yè{žÝ4ÍKûcíCì vŽ…y?]Ol©Ê|Íê¾Þ_;üÿ Ï¡Rçånÿ rÔ’[m²»˜¡Ž4ùDŽ›Ë) $’XxËëšY8¹i•†Á!‘þpJ•V^0 Œ±õèi²Å²en%·„†8eeù²Yˆ,S†=?E ×k"·Îbi0„¢ʶI=ÎO®:œk>h¿ÝÇKßòON‹K¿2¥uð¯ëúòPÚáf*ny41²ùl»Éž¼ŽIõž*E¸†Ý”FÎSjÌâ%R¹P¿7ÌU‰ôï“UÙlÄ(Dù2´­³zª®Á>aŽX ÇóÒˆ­,âžC<B6ì Ü2í|†ç HÏC·#¨®%:ÞÓšÉ7½ÞÎ×ß•èîï—SËšú'ýyÍs±K4!Ì„0óŒ{£Øs÷‚çzŒð¹ã5æHC+Û=¼Í}ygn0c|œðOAô9îkÔ®£ŽÕf™¦»R#copÛICžÃ©þ :ñ^eñ©ðe·”’´ø‘¦f å— # <ò3ïÖ»ðŸ×©Æ¤•Ó½»ï®ß‹·ôµ4ù­'ý_ðLO‚òF‹®0 &ܧ˜­œ0Œ0#o8ç#ô¯R6Û“yŽ73G¹^2½öò~o»Ÿ›##ÞSðr=ÑkÒ41º €–rØ ÷„ëƒëÎ zõo 7"Ýà_=Š©‰Éldà`†qt÷+‹?æxù©%m,ö{.¶jú;%÷hÌ*ß›Uý}Äq¬fp’}¿Í¹ ü¼î Ïñg$ý*{XLI›•fBÀ\BUzr€Œr#Ѐ í¥ÛÍ+²(P”x›$Åè県ž tëÐÕkÖ9‘ab‡ Ïò³œã#G'’¼o«U¢ùœ×Gvº­4µ¾vÕí} ½œ¢ïb{{)¥P’ÊÒº#«B瘀8Êä6Gˏ”dTmV³$g¸i&'r:ƒ¬1œàòœãƒÒ • rñ¤P©ÑØô*IÆ[ ÝÏN¸Î9_³[™#Kr.Fí¤í*IÁ?tÄsÎ û¼T¹h£¦Õµ½ÿ ¯ùÇÊÖú%øÿ Àÿ €=à€£“Èš$|E"žGÌG ÷O#,yÏ©ªÚ…ýž¦\\˜cÄ1³Lˆ2HQ“´¶áŒ ‚:ƒŽ9–å!Š–͐‚ɾF''‘÷yÇNüûãëpÆ|=~¢D•䵕vn2„sÓžGLë IUP´Uíw®Ú-/mm£²×Ì–ìíeý] ? øÑüa¨ÞZÏeki,q‰c10PTpAÜÀg%zSß°2Ĥ¡U]®ØŠÜçžI;€èpx?_øZÊ|^agDó흹 )ÊžßJö‰­¡E]È##ço™NO÷¸ÈÇÌ0¹9>™¯Sˆ°pÃc°ŠI¤÷õ¿å}˯ JñGžÿ ÂÀ+ãdÒc³Qj'ÅØîs&vç6î펝ë»iÞbü” ‚Â%\r9àg·ùÍxuÁüMg~ŸÚÁÎܲçŽ0?*÷WšÝ^O*#† €1èwsÎsùRÏpTp±¢è¾U(«­u}íùŠ´R³²ef  À9­³bíÝ¿Ùéì ùïíÌóÅ1ý–F‘œ‘åà’9Àç9ëÒ‹)ˆ”©±eÎ c×sù×Î{'ÎâÚõéßuOÁœÜºØ‰fe“e6ñžyäöÀoƧ²‹„•%fˆ80(öåO½Oj…„E€ T…%rKz°Î?.;{šXÙ‡ŸeUÚd!üx9þtã%wO_øoòcM- j–ÒHX_iK#*) ž@Ž{ ôǽBd¹‰RÝn–ê0«7ˆìyÀ÷Í@¬Ì¢³³’ 9é÷½?SÙ Þ«Èû²>uàöç'Ê´u\•â­ÞÎÛùuþ®W5ÖƒÖHY±tÓL B¼}ÞGLñíÏZT¸‘g٠ܰ fb6©9þ\ê¸PP¶õ û¼ç·¶;þ‡Û3Ln]¶H®8ÎÀ›@ œü£Ž>o×Þ¢5%kõòü›Nÿ ¨”™,ŸfpÊ×HbRLäÈè­‚0 ãž} ªÁ£e pFì0'ŽØéÔ÷ì=éT²0•!…Îzt9ç¾?”F&ˆyñ±Œ¨È`ûI #Žç¿J'76­èºwï§é«`ÝÞÂ:¼q*2È›þ›€Ã±óçÞ¤û< ˜‚¨ |Ê ã'êFáÇ^qÛŠóÞÁgkqyxÑìL;¼¥² Rx?‡¯Y7PŽwnù¶†û¾Ü·.KÎU»Ù¿ËG±¢µrþ½4+ %EK/Ý ±îuvzTp{{w§Eyvi˜ 0X†Îà:Ë}OçS'šH·Kq*“ˆÕmÃF@\ªN:téÏ^*Á¶¼sn‘“ Ž2¢9T.½„\ ýò@>˜7NFïNRÓ·wèôßEÕua'¬[þ¾cö¡̐Oæ¦âÅŠ². Ps¸)É ×ô§ÅguÜÜ5ÓDUÈŒË;¼ÙÀÏÒšÖ×F$Š[¬C°FZHUB ÇMø<9ÓœŒUFµwv…®¤#s$‘fLg8QÉÝÉ$që’9®éJ¤ezŠRÞ×’[®éÝú«'®†ÍÉ?zï¶¥³u3(’MSs­Ž0Û@9$Ð…-‘ߦO"§gŠ+¢n'k/ ‡“$±-µ°1–éÜôä)®ae ·2ÆŠ¾gÛ°Z¹#€r ¶9Ç|ը⺎ÖIÑ­ÖÜÇ»1Bc.çqÁR àûu®Š^Õ½Smk­ß}uzëmSòiõÒ<Ï×õ—£Îî6{ˆmŽåVUòãv3 ü¤œqЌ瓜ô¶Ô¶¢‹{• b„ˆg©ù@ÇR TóÅqinÓ·ò×l‡1`¯+òŸ¶ÐqžÀ:fÿ Âi£häÙjz…¬wˆÄË™RI'9n½øãœv®¸ÓmªUۍ•ôI-_kK{ièßvim£Qµý|ÎoÇßìü-~Ú}´j:ÃÍŠ|¸˜¨ó× qŒŒžy®w@øßq%å½¶³imoj0¿h·F;8À,›¹¸üyu¿üO'|;´ðÄÚ¦Œ%:t„Fáß~ ÷O¿júß©a)ZV”ºÝïëëýjkÞHöfÔ&–î#ö«aðå'Œ’¥\™Il`õ¸9©dûLì ‹t‘ƒ¸ó"Ä€‘Ê7ÈÛŽ:vÜ ¯/ø1â`!»Ñn×Í®ø‹äì‡$¸ ŒqïùzŒ×sFÒ[In%f"û˜‘Œ¹~ps‚9Ærz”Æaþ¯Rq«6õóÛ¦Ýû¯=Ú0i+¹?ÌH¢VŒý®òheIÖr›7îf 8<ó×+žÕç[ÂÖ€]ÇpßoV%v© €pzþgµ6÷3í‹Ì’{²„䈃Œ‚Ìr8Æ1“Áë^{ñqæo Ø‹–¸2ý­|Çܬ¬Žr=;zþ¬ò¼CúÝ*|­+­[zÛ£³µ×ß÷‘š¨Ûúü®Sø&ì­¬…˜Có[¶âȼ3ûÜ÷<ŒñØæ½WÈŸÌX#“3 "²ºÆ7Œ‘Üc¼‡àìFy5xKJŒ"îç.r@ï×Þ½Ä-ÿ þ“}ª}’*Þ!,Fm¸Î@†9b?1W{Yæ3„`Ú¼VõŠÚÛ_kùöG.mhÎñ ôíhí§Ô$.ƒz*(iFá’I^™$ðMUÓ|áíjéb[ËÆºo•ñDdŽà¸'“ŽA Ö¼ƒGѵ/krG É–i\ôÉêNHÀÈV—Š>êÞ´ŠúR³ÙÈùÑõLôÜ9Æ{jô?°°Kýš¥WíZ¿V—m6·E}{X~Æ? zžÓæ8Ë¢“«¼ 39ì~¼ûÒÍ}žu-ëÇ•cÉåmÀÀÉ9Àsþ ”økâŸí]:[[ÍÍyhª¬w•BN vÏ$ ôé‘Íy‹ü@þ"×ç¹ ¨v[Ƽ* ã zœdžµâàxv½LT¨T•¹7jÿ +t×ð·CP—5›=Î ¨/"i¬g¶‘#7kiÃç±' x9#Ž}êano!òKD‘ílï”('¿SÔð?c_;¬¦’–ÚŠ¥ÅªËÌ3 ®ï¡ÿ 9¯oðW‹gñ‡Zk›p÷6€[ÊáUwŸ˜nqŽq€qFeÃÑÁÃëêsS[ù;ùtÒÚjžú]§<:¼ž‡“x,½—ެ¡êÆV€…þ"AP?ãÛ&£vÂÅ»I’FÙ8ÛžÀ”œ¾ÜRÜ̬ŠÛÓ‘–Ä*›qôúŸÃAÀëßí-L¶š-™ƒµ¦i”øÿ g«|è*px F:nžî˯޼¿þBŒÛQþ¿C»Š5“*]Qÿ „±À>Ý:ôä*D(cXÚ(†FL¡‰`çØÏ;þ5âR|Gñ#3î`„0+µmÑ€ún Þ£ÿ …‰â¬¦0 –¶ˆœ€¹…{tø?ʯ(_çþ_Š5XY[¡Ù|Q¿ú µŠ2︛sO* Бÿ ×â°<+à›MkÂ÷š…ij ·Ü–ˆ«ò‚?ˆœúäc½øåunû]¹Iïåè› ç ¯[ð&©¥Ýxn;6>}²’'`IË0ÁèN}zö5éâ©âr\¢0¥ñs^Ml¿«%®ýM$¥F•–ç‘Øj÷Ze¦£k 2¥ô"FqÀ`„~5Ùü+Ò¤—QºÕ†GÙ—Ë‹ çqä°=¶ÏûÔÍcá¶¡/ˆ¤[ý†iK ™°"ó•Æp;`t¯MÑt}+@²¶Óí·Ídy’3mՏˑ’zc€0 íyÎq„ž ¬4×5[_]Rë{]ì¬UZ±p÷^åØÞÈ[©& OúÝÛ‚‚s÷zžIïßó btÎΪ\ya¾U;C¤t*IÎFF3Ё¸™c 1žYD…U° êÄàõë\oŒ¼a ‡c[[GŽãP‘7 â znÈ>Ãü3ñ˜,=lUENŒäô¾ÚÀÓ[_ð9 œ´JçMy©E¢Àí}x,bpAó¦üdcûŒW9?Å[Há$¿¹pÄ™#^9O88©zO=«Ë!µÖüY¨³ªÍy9ûÒ1 úôÚ»M?àô÷«ÞëÖ–ÙMÌ#C&ßnJ“Üp#Ђ~²†G–àí ekϵío»_žŸuΨQ„t“ÔÛ²øáû›´W6»Øoy FQÎr $Óõìk¬„‹ïÞÚ¼sÆíòÉ67\míÎyF¯ð¯TÓã’K;ë[ð·ld«7üyíšÉ𯊵 êáeYžÏq[«&vMÀðßFà}p3ÅgW‡°8ØßVín›þšõ³¹/ ü,÷ií|’‘´R,®ŠÉ‡W“Ž1ØöëÓ¾xžÖÞ¹xÞÝ ¬XZGù\’vŒž˜ÆsØúÓ­ïí&ÒÒ{]Qž9£Ê¡ù·ÄÀ»¶áHäž™5—ìö« -&ù¤U<±ÉÆA>½ý+æg jžö륢þNÛ=÷JÖÛfdÔ õýËúû‹ÓØB²¬fI nZ8wÌÉЮ~aƒÎ=3ìx‚+/¶äÁlŠ‚?™Æü#8-œ\pqTZXtè%»»&ÚÝ#´ŠðÜ žã§Í’¼{p·ß{m>ÞycP¨’¼¢0ú(Rƒë^Ž ñó¼(»y%m´ÕÙ}ÊûékB1¨þÑ®,#Q)ó‡o1T©ÜÃ*Ž‹‚yö< b‰4×H€“ìÐ. ¤²9ÌŠ>„Žãøgšñ ¯Š~)¸ßå\ÛÛoBŒa·L²œg$‚Iã¯ZÈ—Æ~%”äë—È8â)Œcƒ‘Âàu9¯b%)ÞS²¿Ïïÿ 4Öºù}Z/[H%¤vÉ#Ì’x§†b © ³´tÜ{gn=iï%õªÇç]ܧ—! åw„SÓp ·VÈÏ¡?5Âcâb¥_ĤŠz¬—nàþÖΟñKÄöJé=ÌWèêT‹¸÷qÎჟ•q’zWUN«N/ØO^Ÿe|í¾©k{üõ4öV^ïù~G¹êzÂèº|·÷×[’Þ31†rpjg·n Æ0Ý}kåË‹‰nîe¹ËÍ+™ÏVbrOç]'‰¼o®xÎh`¹Ç*±ÙÚ!T$d/$žN>¼WqᯅZ9ÑÒO\ÜÛê1o&,-z ~^NCgNÕéá)ÒÊ©7‰¨¯'Õþ¯þ_¿Ehîþóâ €ï¬uÛûý*ÎK9ä.â-öv<²‘×h$àãúW%ö¯~«g-ÕõÀàG~>Zú¾Iš+(šM³ Û#9äl%ðc¬ ûÝ xÖKG´x®|¸¤Ï™O:Ê8Ã’qÉcÔä‚yÇNJyËŒTj¥&µOmztjÿ ?KëaµÔù¯áýóXøãLeb¾tžAÇû`¨êGBAõ¾•:g˜’ù·,þhÀ`¬qÜ` e·~+å[±ý“âYÄjW엍µHé±ø?Nõô>½âX<5 Ç©ÏѼM¶8cܪXŽÉ^r?¼IróÈS•ZmÇ›™5»òÚÚ7ïu«&|·÷•Ά >[©ÞXHeS$Œyà€ ÷ù²:ò2|óãDf? Z¼PD¶ÓßC(xÆ0|©ßR;ôMsÿ µ´ÔVi¬,͹›Ìxâi˜`¹,GAéÇlV§ÄýF×Yø§ê–‘:Ã=ò2³9n±ÉžØÏ@yÎWžæ±Ãàe„ÄÒN ]ïòêìú_Go'¦ŽÑ’_×õЯðR66þ!›ÑÄ gFMÙ— äžäqôÈ;ÿ eX<#%»Aö‰ãR¤ Í”Ž¹È G&¹Ÿƒ&á?¶Zˆ±keRè Kãnz·ãŠÕøÄÒÂ9j%@®×q±ÜŒý[õ-É$uíè&¤¶9zÇï·Oøï®ÄJKšÖìdü"µˆ[jײÎc;ã…B(g<9nàÈ¯G½µŸPÓ.´Éfâ¼FŽP 31 ‘ÏR}<3šä~ Ã2xVöî Dr Ç\›}Ý#S÷ÈÀëŽHÆI®à\OçKuäI¹†ó(”—GWî ñ³¹¸æ2¨›‹ºÚû%¾ýÖ_3ºNú¯ëúì|ÕÅÖ‰}y lM’ZËîTÿ á[ðÐñ/ˆ9Àû ¸ón3 Mòd‘÷ döª^.Êñް›BâîNp>cëÏçÍzïíôÏ YÍ%ª¬·ãÏ-*9Ü­ÂãhéŒc¾dÈêú¼Ë,. VŠ÷çeÿ n/¡¼äãõâ=‹xGQKx”|¹bÌŠD@2Œ 8'Ž àúƒŽ+áDÒ&¡¨"Œ§–Žr22 Ç·s]ŸÄ‹«ð%ÚÄ<¹ä’(×{e›HÀqÁç©Ç½`üŽÚõK饚9ƒÄ±€< –úƒú~ çðñO#­Í%iKKlµ¦¾F)'Iê¬Î+Ç(`ñ¾£œdÈ’` ™ºcßéé^ÿ i¸”Û\ý¡æhÔB«aq¸}ãÀÆ:ÜWƒ|FÛÿ BŒÇÀeaŸ-sÊ€:úW½ÜÝÜ<%$µ†%CóDªÀí%IÈÏʤ…ôäñÞŒ÷‘a0“ôŽÚë¤nŸoW÷0«e¶y'Å»aΗ2r’# Û°A^ý9ÉQÔõ=ù5¬£Öü.(Þ’M$~V«=éSÄFN½®©ÔWô»ÿ þHžkR‹ìÏ+µµžöê;khÚI¤m¨‹Ôš–âÖçJ¾_Z•’6 a”Èô> ÕÉaÕ<%®£2n bQŠå\tÈõUÿ ø»þ‹k15‚ÃuCL$ݹp P1=Oøýs¯^u éEJ”–éêŸê½5ýzy›jÛ³á›Ûkÿ ÚOcn±ÛÏîW;boºz{ãžüVÆ¡a£a5½äÎÂks¸J@?1è¿{$䑐=k”øsÖ^nŒ¦)ÝåXÃíùN1ØõÚOJë–xF÷h¸ Œ"Ž?x䜚ü³ì¨c*Fœ¯i;7~ñí׫Ðó¥Ë»3Ãü púw ‰°<Á%»ñž ÿ P+Û^ ¾Ye£ŽCÄŒ„/>˜>•á¶Ìm~&&À>M[hÈÈÿ [Ž•íd…RO@3^Ç(ʽ*¶ÖQZyßþ 1Vº}Ñç?¼O4Rh6R€ª£í¡ûÙ a‚3ß·Õ ü=mRÍ/µ9¤‚0ÑC¼Iè:cŽsÛ¾™x£ÆÐ¬ªÍöˢ샒W$•€Å{¨ÀPG ÀÀàŸZìÍ1RÉ0´ðxEË9+Éÿ ^rEÕ—±Š„70l¼áË@û.' ¼¹Žz€N3úUÉ<3á×*?²¬‚ä†"Ùc=p íÛ'¡ª1ñ"økJ†HÒ'»Ÿ+ oÏN¬Ã9 dÙãÜדÏâÍ~æc+j·Jzâ7(£ðW]•晍?nê´º6åwéåç÷N•ZŠíž›¬|?Ðõ?Ñ-E…®³ÇV$~X¯/…õ x‘LˆÑÜÚÈ7¦pzãÜüë½ðÄ^õtÝYËÍ7ÉÖÕ8ÏUe# #€r=sU¾/é’E§jRC4mxNÝ´9†íuá»›V‘ ZI€­×cr1Ÿpzsøf»¨åV‹ìû`qËLÊIã?\~¼³áËC©êhªOîO»‘ÃmçÛçút×¢x“Z}?Üê#b-¤X7õ Äò gž zzbº3œm*qvs·M=íúéw}¿&Úª°^Ö×µÏ(ø‡â†Öµƒenñý†×åQáYûœ÷ÇLœôÎNk¡ð‡¼/µ¸n0æÉ0¬ƒ‚üîÉÆvŒw®Sáö”š¯‹-üÕVŠØÙ[$`(9cqƒÔ_@BëqûÙ`Ýæ­0;79È?w<ó |ÙÜkßÌ1±Ëã ¿ìÒ»ðlìï«ÓnªèèrP´NÏš&Žéö Ù¸÷æ°~-_O'‰`°!RÚÚÝ%]Ø%þbß1'¿ÿ X՝áOöÎŒ·‹¬+Åæ*ÛÛ™0¤ƒOÍÔ `u¯¦ÂaèÐÃÓ«‹¨Ô¥µœ¿¯ÉyÅÙ.oÔôŸ Úx&(STðݽ¦õ] ’ÒNóÁäÈùr3í·žÚ[™ƒ¼veÈ÷ÞIõÎGlqÎ=M|«gsªxÅI6 ]Z·Îªä,¨zŒŽÄ~#ØŠúFñiÉqc©éÐD>S딑 GñŽ1éÐ^+ Ëi;Ô„µVÕú»i¯ÈÒ-ZÍ]òܘ®ì` bÛÙ¥_/y(@÷qÐúg Ô÷W0.Ø› 6Ò© r>QƒŒ0+Èîzb¨É+I0TbNñ"$~)ÕÒ6Þ‹{0VÆ27œWWñcÄcX×íôûyKZéðªc'iQ¿¯LaWŠŸS\·Š“źʸ…ôÙÂí|öÀÇåV|!¤ÂGâÛ[[’ï 3OrÙËPY¹=Î1õ5öåTžÑè Ú64/üö?Zëžk}¬¶éào፾á}3“ü]8Éæ¿´n²Žš_6¾pœ)2?úWÓÚ¥¾¨iWúdŽq{*ª1rXŒd…m»‰äcô¯–dâ•ã‘Jº¬§¨#¨® §,df«8ÉÅßN¾hˆ;îÓ=7áùpën®É 6ûJžO2^œÐò JÖø¥²ã›Ò6Ü·‰!wbÍ‚¬O©»õ¬ÿ ƒP=Ä:â¤-&ÙŽ ` È9 r9íϧzë> XÅ7ƒ5X–krÑ¢L 7€ìw}ÑŸNHëŒüþ:2†á¼+u·á÷N/Û'Ðç~ߘô«ëh!ónRéeQ´6QÛÿ èEwëÅÒ|¸Yqó1uêyùzð8 ƒŠù¦Ò;¹ä6öi<'ü³„[íZhu½ ùÍ¡g‚>r¯׊îÌx}bñ2“­k꣧oø~›hTèóËWò4|ki"xßQ˜Ï6øÀLnß‚0 ¹Æ{±–¶Öe#¨27È@^Ìß.1N¾œyç€õ†ñeé·Õã†çQ°€=­Ì©ºB€Ø8<‚ÃSõ®ùcc>×Ú .Fr:žÝGæ=kÁâ,^!Fž ¬,àµ}%¶«îõ¹†"r²ƒGœüYÕd?aÑÍY®49PyU ÷þ!žxÅm|/‚ãNð˜¼PcûTÒ,¹/Ý=FkÏ|u¨¶«â녏{¤m¢]Û¾ïP>®XãÞ½iÓÁ¾ ‰'¬–6ß¼(„ï— í!úÙäzôë^–:œ¨å|,_¿&š×]uÓѵÛô4’j”bž§x‘Æ©ã›á,‚[Ô ÎÞ= ŒËæ ÀùYÁ?ŽïÚ¼?ÁªxºÕÛ,°1¸‘¿ÝäãØ¯v…@¤åq½ºã œàûââ·z8Xýˆþz~—û»™âµj=Ž â~ãáh@'h¼F#·Üp?ŸëQü-løvépx»cŸø…lxâÃûG·‰¶ø”L£©%y?¦úõÆü-Õ¶¥y`Òl7>q’2üA?•F}c‡jB:¸Jÿ +§¹¿¸Q÷°ív=VÑìu[Qml%R7a×IèTõéŽx¬ ?†š7 1†îã-ˆã’L¡lŽ0OÓ=ÅuˆpÇ•¼3ÛùÒ¶W/!|’wŽw^qÔ×Ïaó M8Q¨ãÑ?ëï0IEhÄa¸X•`a ?!ÐñùQ!Rä žqŽžÝO`I0ÿ J“y|ñ!Îã@99>þ8–+éáu…!ù—ä ʰ<÷6’I®z ÅS„¾)Zþ_Öýµ×ËPåOwø÷þ*üïænÖùmØÝûþ¹=>¦½öî×Jh]¼ç&@§nTŒ6IT Àõ^Fxð7Å3!Ö·aÛ$þÿ ¹ã5îIo:ȪmËY[’8ÇӾlj*òû¢¥xõ¾¼ú•åk+\ð¯ HÚoŽl•Ûk,¯ ç²²cõÅ{²Z\ ´ìQ åpzŽ3Ôð}ÿ Jð¯XO¡øÎé€hÙ¥ûLdŒ`““ù6Gá^ÃáÝ^Ë[Ñb¾YåŒÊ»dŽ4 †2§,;ÿ CQÄ´¾°¨c–±”mºV{«ßÕýÄW\ÖŸ‘çŸ,çMRÆí“l-ƒn~ë©ÉÈê Ü?#Ž•¹ðãSÒ¥ÐWNíà½;ãž)™ÎSÈ9cóLj뵿Å«iÍk¨ió­¶X‚7÷ƒ€yãnyÏŽëÞ Öt`×À×V's$È9Ú:ä{wÆEk€«†Çàc—â$éÎ.éí~Ýëk}ÅAÆpörÑ¢‡Šl¡ÑüSs‹¨‰IÝ„óÀ×wñ&eºðf™pŒÆ9gŽTø£lñëÀçŽ NkÊUK0U’p ï^¡ãÈ¥´ø{£ÙHp`’ØåbqÏ©äó^Æ: Ž' ÊóM«õz+ß×ó5Ÿ»('¹­ð¦C„$˜Å¢_ºÈI?»^äã'ñêzž+ë€ñ-½»´}¡Ë*õ?.xÇ^1ŽMyǸ&“—L–îëöâ7…' bqéÎGé]˪â1$o²¸R8Ã`.q€}sÖ¾C9­8cêÆÞíïóòvÓòùœÕfÔÚéýu­èÖ·Ú Å‚_¤³ÜۺƑߝ”àרý:׃xPþÅÕî-/üØmnQìïGΊÙRqê=>¢½õnæ·r!—h`+’;ò3È<“Û©éšóŸx*÷V¹¸×tÈiˆßwiÔÿ |cŒñÏ®3Ö½̰‰Ë Qr©ö½®¼ÛoÑÙZÅÑ«O൯ýw8;k›ÿ x†;ˆJa;‘º9÷÷R+¡ñgŽí|Iáë{ôáo2ʲ9 029ÉÏLí\‰¿¸Ÿb˜ "Bv$£&#ßiê>=ªª©f ’N ëí>¡N­XW­~5×úíø\‰»½Ï^ø(—wÖú¥¤2íŽÞXæÁ$ °eÈ888^nÝë²ñÝÔ^ ÖÚ9Q~Ëå7ï DC¶ÑµƒsËÇè9®Wáþƒ6‡£´·°2\Ý:ÈÑ?(#¨'$õèGJ¥ñW\ÿ ‰E¶—¸™g˜ÌÀ¹;Pv ú±ÎNs·ëŸ’–"Ž/:té+ûË]öJöÓM»ëø˜*‘•^Uý—êd|‰åñMæÔÝ‹23å™6æHùÛ‚ëüñ^…ñ1¢oêûÑEØ.õ7*ÅHtÎp{g<·Á«+¸c¿¿pÓ¾Æby=8É_ÄsÆk¬ñB\jÞÔì••Ë[9Píb‹Bヅ =9­3§ð§LšÛáÖšÆæXÌÞdÛP.0\ãïÛ0?™úJ¸™Ë ”•œº+=<µI£¦í¯õêt¬d‹T¬P=ËFêT>ÍØØ@Ï9<÷AQÌ×»Õ¡xùk",JÎæù±Éç$œŽŸZWH®¯"·UÌQ ’ÙÈ]ÅXg<ã ߨg3-Üqe€0¢¨*Œ$܃ ’Sû 8㎼_/e'+Ï–-èÓ¶¶Õíß[·ÙÙ½î쏗¼sk%§µxä‰â-pÒeÆCrú ôσžû=”šÅô(QW‚Õd\ƒæ. \àö¹¯F½°³½0M>‘gr÷q+œ¶NïºHO— ¤ ܥݭ”n·J|ÆP6Kµc=Isó}Ò çGš)a=—#vK›åoK§ßóٍ¤¶¿õú…ÄRÚ[Ësöټˏ•Ë ópw®qœŒ·Ø ùÇâ‹ý‡ãKèS&ÞvûD Aù‘É9 ŒîqÅ} $SnIV[]ѐ´Ó}ØÜ¾A Ü|½kÅþÓ|E Mu R¼.I¼¶däò‚ÃkÆ}ðy¹vc iUœZ…­Õõ»z¾÷¿n¦*j-É­/àœHã\y5 Û ß™ó0— äŸnzôã#Ô¯,†¥ÚeÔ÷ÜÅ´„“'c…<íÝ€<·SŠ¥k§Ã¢éÆÆÙna‚8–=«ʪ[Ÿ™°pNî02z“ÔÙ–K8.È’Þî(vƒ2®@ äÈûãçžxäÇf¯ˆu¹yUÕîýWšÙ|›ëÒ%Q^í[æ|éo5ZY•^{96ˆY‚§v*x>âº_|U¹Ö´©tûMÒÂ9PÇ#«£#€ éÉñ‘ƒÍz/‰´-į¹°dd,Б›p03ƒœ{ç9=+ Ûᧇ¬¦[‡‚ê婺¸#±ß=³ý¿•Õµjñ½HÙh›Û[§ÚýÊöô÷{˜?ô÷·Ô.u©–_%còcAÀ˜’ }0x9Î>žñÇáÍ9,ahï¦Ì2òÓ ñÛAäry$V²Nð ]=$Ž ‚#Ù‚1ƒƒødõMax‡ÂÖ^!±KkÛ‘ «“Çó²FN8+ëÎ{Ò¼oí§[«ÕMRoËeç×[_m/¦¦k.kôgŽxsSÓ´ý`êzªÜÜKo‰cPC9ÎY‰#§^üý9¹âïÞx£Ë·Ú`±‰‹¤;³–=ÏaôÕAð‚÷kêÁNBéÎælcõö®£Fð†ô2Ò¬]ßÂK$ÓÜ®•”/ÊHàã$ä ¸÷ëf¹Oµúâ“”’²ø­è´µþöjçNü÷üÌ¿ xNïFÒd»¼·h®îT9ŽAµÖ>qÁçÔœtïÒ»\ȶÎîcÞäîó3¶@#ÉIÎ ÔñW.<´’¥–ÑÑ€ÕšA‚ ;†qÓë‚2q ÒÂó$# Çí‡ !Ë}Õ9ÈÎÑÉã=;ŒÇÎuñ+ÉûÏ¥öíeÙ+$úíÜ娯'+êZH4ƒq¶FV‹gïŒ208ÆÌ)íб>M|÷âÍã¾"iì‹¥£Jd´™OÝç;sÈúr+ÜäˆË)DŒ¥šF°*3Õ”d {zÔwºQ¿·UžÉf†~>I+ŒqÔ`ð3œ“Ü×f]œTÁÔn4“ƒø’Ýßõ_«*5šzGCÊ,þ+ê1ò÷O¶¸cœºb2yÇ;cùÕ£ñh¬›áÑŠr¤ÝäNBk¥—á—†gxšX/쑘hŸ*Tçn =û㦠2|(ð¿e·ºÖ$ ýìŸ!'åΰyîî+×öœ=Y:²¦ÓÞ×iü’—ü -BK™£˜›âÆ¡&véðõ-ûÉY¹=Onj¹ø¯¯yf4·±T Pó`çœ7={×mÃ/ ¢˜ZÚòK…G½¥b„’G AãÜœ*í¯Ã¿ IoæI¦NU8‘RwÈã;·€ Û×ëÒ”1Y •£E»ÿ Oyto¢<£Áö·šï,䉧ûA¼sû»Nò}¹üE{ÜÖªò1’õÞr0â}ÎØ#>à/8ïéÎ~—áÍ#ñÎlí§³2f'h”?C÷YËdð:qëõÓ·‚ïeÄ© ÔÈØÜRL+žAÎ3¼g=åšó³Œt3 ÑQ¦ùRÙßE®¼±w_;þhš’Sirÿ ^ˆã¼iੇ|RòO„m°J/“$·l“ ÇÓ¿ÿ [ÑŠÆ“„†Õø>cFÆ6Ø1ƒ– àz7Ldòxäüwá‹ÝAXùO•Úý’é®ähm­ •NÀ±ÌTÈç ƒ‘I$pGž:‚ÄbêW¢®œ´|­¦­nÍ>¶ÖÏ¢§ÎÜ¢ºö¹•%ÄqL^öÛ KpNA<ã¡ …î==ª¸óffËF‡yÌcÉ ©ç$ð=ñÏ­YþÊ’Ú]—¥‚¬‚eDïÎH>Ÿ_ÌTP™a‰ch['çÆÜò7a‡?w°Ïn§âÎ5”’¨¹uÚÛ|´ÓÓc§{O—ü1•ªxsÃZ…ÊÏy¡Ã3¸Ë2Èé» ‘ƒÎ äžÜðA§cáOéúÛ4ý5-fŒï„ù¬ûô.Ç Üsž•Ò¾•wo<¶Ÿ"¬¡º|£ î2sÇ¡éE²ÉFѱrU°dÜ6œ¨ mc†Îxë׺Þ'0²¡Rr„{j¾í·è›µ÷)º·å–‹î2|I®Y¼ºÍË·–ÃÆà㍣'óÆxƒOÆÞ&>\lóÌxP Xc¸ì Sþ5§qà/ê>#žÞW¸if$\3 ® ûÄ“ùŽÕê¾ð<Ó‹H¶óÏ" å·( á‘€:ã†8Ï=+ꨬUA×ÃËÚT’ÑÞöù¥¢]{»ms¥F0\ÑÕ—ô}&ÛB´ƒOŽÚ+›xíÄÀ1 ,v± žIëíZ0ǧ™3 í2®0ทp9öÝÔž)ÓZËoq/Ú“‘L ²ŒmùŽÓ9§[Û#Ä‘\ÞB¬Çs [;à à«g‚2ôòªœÝV§»·¯/[uó½õÛï¾ /šÍ}öüÿ «=x»HŸÂÞ.™ ÌQùŸh´‘#a$‚'¡u<Š›Æ>2>+ƒLSiöwµFó1!eg`£åœ ÷ëÛö}Á¿ÛVÙêv $¬ƒ|,s÷z€ð΃¨x÷ÅD\ÜŒÞmåÔ„ ˆ o| :{ÇÓ¶–òÁn!´0Ål€, ƒ ( ÛŒŒ c¶rsšæ,4‹MÛOH!@¢ ÇŽ„`å²9ÝÃw;AÍt0®¤¡…¯ØÄ.Àì클ƒ‘ßñ5Í,Óëu-ÈÔc¢KÃÓ£òÖ̺U.õL¯0…%2È—"~x ‚[`có±nHàŽyàö™¥keˆìŒÛFç{(Ø©†`Jã#Žwg<“:ÚÉ;M ^\yhûX‡vB·÷zrF?§BÊÔ/s<ÐÈB)Û± ·ÍÔwç5Âã:så§e{mѤï«Òíh—]Wm4âí¿ùþW4bC3¶ª¾Ùr$ pw`àädzt!yŠI„hÂîàM)!edŒm'æ>Ç?wzºK­ìcŒ´¯Ìq6fp$)ãw¡éUl`µ»ARAˆÝÕgr:äŒgƒéé[Ôö±”iYs5Ýï«ÙG—K=þF’æMG«óÿ `ŠKɦuOQ!ÕåŒ/ÎGÞ`@ËqÕzdõâ«Ê/Ö(ƒK´%ŽbMü åÜŸö—>¤óŒŒV‘°„I¢Yž#™¥ùÏÊ@8 œgqöö5ª4vד[¬(q cò¨À!FGaÁõõ¯?§†¥ÏU½í¿WªZ$úyú½Žz×§Éþ?>Ã×È•6°{™™ŽÙ.$`­ÎUœ…çè ' ¤r$1Ø(y7 ðV<ž:È  ÁÎMw¾Â'Øb§øxb7gãО½óÉÊë²,i„Fȹ£§8ãä½k¹¥¦ê/ç{ïê驪2œ/«ü?¯Ô›ìñÜ$þeýœRIåŒg9Ác’zrrNO bÚi¢ ѺË/$,“ª¯Ýä;Œ× ´<ÛÑn³IvŸb™¥ nm–ÄŸ—nÝÀãŽ3ëÍG,.öó³˜Ù£¹u ÊÌrŠ[<±!@Æ:c9ÅZh ì’M5ÄìÌ-‚¼ëÉùqŽGì9¬á ;¨A-ž—évþÖ–^ON·Ô”ŸEý}ú×PO&e[]ÒG¸˜Ûp ƒÃà/Ë·8ûÀ€1ž@¿ÚB*²­¼ñì8@p™8Q“žÆH'8«I-%¸‚ F»“åó6°Uù|¶Ú¸ã ò^Äw¥ŠÖK–1ÜÝK,Žddlí²0PÀü“×ükG…¯U«·¶–´w¶ŽÍ¾©yÞú[Zös•¯Á[™6° ¨¼ÉVæq·,# ìãï‘×8îry®A››¨,ãc66»Ë´ã'æÉù?t}¢æH--Òá"›|ˆ¬[í  7¶ö#¸9«––‹$,+Ëqœ\Êø c€yê^ݸÄa°«™B-9%«×®‹V´w~vÜTéꢷþ¼ˆ%·¹• ’[xç•÷2gØS?6åÀÚ õ9É#š@÷bT¸º²C*3Bá¤òÎA9 =úU§Ó"2Ãlá0iÝIc‚2Î@%öç94ùô»'»HÄ¥Ô¾@à Tp£šíx:úÊ:5eºßMý×wµ›Ó_+šº3Ýyvÿ "ºÇ<ÂI>Õ 1G·Ë«È«É# àÈÇ øp Jv·šæDûE¿›†Ë’NFr2qŸ½ÇAÜšu•´éí#Ħ8£2”Ú2Ã/€[ÎTr;qŠz*ý’Îþ(≠;¡TÆâ›;ºÿ àçœk‘Þ­8¾Uª¾íé{^×IZéwÓkXÉûÑZo¯_øo×È¡¬ â–ÞR§2„‚Àœü½ùç® SVa†Âüª¼±D‘ŒísŸàä|ä2 æ[‹z”¯s{wn„ÆmáóCO+†GO8Ïeçåº`¯^¼ðG5f{Xžä,k‰<á y™¥voÆ éÛõëI=œ1‹éíÔÀÑ)R#;AÂncäŽ:tÏ#¶TkB.0Œ-ÖÞZÛgumß}fÎJÉ+#2êÔP£žùÈÅi¢%œ3P*Yƒò‚Aì“Ž2r:ƒÐúñi­RUQq‰H9!”={~¼ “JŽV¥»×²m.ÛߺiYl¾òk˜gL³·rT• ’…wHÁ6ä`–Î3ùÌ4Øe³†&òL‘•%clyîAÂäà0 žüç$[3uŘpNOÀÉ=† cï{rYK ååä~FÁ •a»"Lär1Ó¯2Äõæ<™C•.fÕ»è¥~½-¿g½Â4¡{[ør¨¶·Žõäx¥’l®qpwÇ»8ärF \cޏܯÓ-g‚yciÏÀ¾rÎwèØÈ#o°Á9ã5¢šfÔxÞæfGusÏÌJÿ µ×œ/LtãÅT7²¶w,l ɳ;”eúà·¨çîŒsÜgTÃS¦­^ '~‹®›¯+k÷ZÖd©Æ*Ó[Ü«%Œk0ŽXƒ”$k#Ȩ P2bv‘ƒŸáÇ™ÆÕb)m$É*8óLE‘8'–ÜN Úyàúô­+{uº±I'wvš4fÜr íì½=úuú sFlìV$‘ö†Hсù€$§ õ=½¸«Ž] :Ž+•¦ïmRþ½l´îÊT#nkiøÿ _ðÆT¶7Ò½ºÒ£Î¸d\ã8=yãŽÜäR{x]ZâÚé#¸r²#»ÎHÆ6õ ç® ÎFkr;sºÄ.&;só± Ç9êH÷ýSšÕ­tÐU¢-n­ Ì| vqœ„{gŒt§S.P‹’މ_[;m¥Þ­ZýRûÂX{+¥úü¼ú•-àÓ7!„G"“´‹žƒnrYXã¸îp éœ!Ó­oP̏tÑ (‰Þ¹é€sÓ#GLçÕšÑnJý¡!‘Tä#“ß?îýp}xÇ‚I¥Õn#·¸–y'qó@r[ Êô÷<ÔWÃÓ¢áN¥4ԝ’I&ݼ¬¬¼ÞºvéÆ FQV~_ÒüJÖÚt¥¦Xá3BÄP^%ÈÎW-×c¡ú©¤·Iþèk¥š?–UQåIR[’O 5x\ÉhÆI¶K4«2ùªŠŒ<¼óœçØ`u«‚Í.VHä € Ëgfx''9ÆI#±®Z8 sISºku¢ßÞ]úk»Jößl¡B.Ü»ÿ MWe °·Ž%šêɆ¼»Âù³´œ O¿cÐÓÄh©"ÛÜÏ.ÖV ’3nüÄmnq[ŒòznšÖ>J¬òˆæ…qýØP Ž:ä7^0yëWšÍ_79äoaÈ °#q0{ää×mœy”R{vÒÞ¶ÚÏe¥“ÚÆÐ¥Ì®—õýjR •íç›Ìb„+J yÜØÙ•Ç]¿Ôd þËOL²”9-Œ—õÃc'æÝלçÚ²ìejP“½ âù°¨†ðqòädЃÉäÖÜj÷PÇp“ÍšŠå«‘î <iWN­smª»¶vÓz5»ûì:Rs\Ðßôû×uÔÿÙ