summaryrefslogtreecommitdiff
path: root/databases/openldap/patches
diff options
context:
space:
mode:
authorkleink <kleink@pkgsrc.org>2003-01-29 22:05:33 +0000
committerkleink <kleink@pkgsrc.org>2003-01-29 22:05:33 +0000
commit51c13fecd3d2c831ac524d06bef8cfc80eaaa65b (patch)
tree0946f6a083793c0bb1ee22146f930c1d1066ef32 /databases/openldap/patches
parent31a4c0b070852e17d2cc3d3356c2f5f2a762bb75 (diff)
downloadpkgsrc-51c13fecd3d2c831ac524d06bef8cfc80eaaa65b.tar.gz
As usual, prevent compiling SHA1 from blowing up on sparc64 by moving
each round into a separate function. Bump to 2.0.27nb2.
Diffstat (limited to 'databases/openldap/patches')
-rw-r--r--databases/openldap/patches/patch-ad125
1 files changed, 125 insertions, 0 deletions
diff --git a/databases/openldap/patches/patch-ad b/databases/openldap/patches/patch-ad
new file mode 100644
index 00000000000..b11c779fdc3
--- /dev/null
+++ b/databases/openldap/patches/patch-ad
@@ -0,0 +1,125 @@
+$NetBSD: patch-ad,v 1.1 2003/01/29 22:05:34 kleink Exp $
+
+--- libraries/liblutil/sha1.c.orig 2002-01-04 21:38:25.000000000 +0100
++++ libraries/liblutil/sha1.c 2003-01-29 22:39:16.000000000 +0100
+@@ -41,13 +41,13 @@
+ * I got the idea of expanding during the round function from SSLeay
+ */
+ #if BYTE_ORDER == LITTLE_ENDIAN
+-# define blk0(i) (block[i] = (rol(block[i],24)&0xFF00FF00) \
+- |(rol(block[i],8)&0x00FF00FF))
++# define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
++ |(rol(block->l[i],8)&0x00FF00FF))
+ #else
+-# define blk0(i) block[i]
++# define blk0(i) block->l[i]
+ #endif
+-#define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \
+- ^block[(i+2)&15]^block[i&15],1))
++#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
++ ^block->l[(i+2)&15]^block->l[i&15],1))
+
+ /*
+ * (R0+R1), R2, R3, R4 are the different operations (rounds) used in SHA1
+@@ -58,6 +58,63 @@
+ #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+ #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
++typedef union {
++ u_char c[64];
++ uint32 l[16];
++} CHAR64LONG16;
++
++#ifdef __sparc_v9__
++void do_R01(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *);
++void do_R2(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *);
++void do_R3(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *);
++void do_R4(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *);
++
++#define nR0(v,w,x,y,z,i) R0(*v,*w,*x,*y,*z,i)
++#define nR1(v,w,x,y,z,i) R1(*v,*w,*x,*y,*z,i)
++#define nR2(v,w,x,y,z,i) R2(*v,*w,*x,*y,*z,i)
++#define nR3(v,w,x,y,z,i) R3(*v,*w,*x,*y,*z,i)
++#define nR4(v,w,x,y,z,i) R4(*v,*w,*x,*y,*z,i)
++
++void
++do_R01(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *block)
++{
++ nR0(a,b,c,d,e, 0); nR0(e,a,b,c,d, 1); nR0(d,e,a,b,c, 2); nR0(c,d,e,a,b, 3);
++ nR0(b,c,d,e,a, 4); nR0(a,b,c,d,e, 5); nR0(e,a,b,c,d, 6); nR0(d,e,a,b,c, 7);
++ nR0(c,d,e,a,b, 8); nR0(b,c,d,e,a, 9); nR0(a,b,c,d,e,10); nR0(e,a,b,c,d,11);
++ nR0(d,e,a,b,c,12); nR0(c,d,e,a,b,13); nR0(b,c,d,e,a,14); nR0(a,b,c,d,e,15);
++ nR1(e,a,b,c,d,16); nR1(d,e,a,b,c,17); nR1(c,d,e,a,b,18); nR1(b,c,d,e,a,19);
++}
++
++void
++do_R2(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *block)
++{
++ nR2(a,b,c,d,e,20); nR2(e,a,b,c,d,21); nR2(d,e,a,b,c,22); nR2(c,d,e,a,b,23);
++ nR2(b,c,d,e,a,24); nR2(a,b,c,d,e,25); nR2(e,a,b,c,d,26); nR2(d,e,a,b,c,27);
++ nR2(c,d,e,a,b,28); nR2(b,c,d,e,a,29); nR2(a,b,c,d,e,30); nR2(e,a,b,c,d,31);
++ nR2(d,e,a,b,c,32); nR2(c,d,e,a,b,33); nR2(b,c,d,e,a,34); nR2(a,b,c,d,e,35);
++ nR2(e,a,b,c,d,36); nR2(d,e,a,b,c,37); nR2(c,d,e,a,b,38); nR2(b,c,d,e,a,39);
++}
++
++void
++do_R3(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *block)
++{
++ nR3(a,b,c,d,e,40); nR3(e,a,b,c,d,41); nR3(d,e,a,b,c,42); nR3(c,d,e,a,b,43);
++ nR3(b,c,d,e,a,44); nR3(a,b,c,d,e,45); nR3(e,a,b,c,d,46); nR3(d,e,a,b,c,47);
++ nR3(c,d,e,a,b,48); nR3(b,c,d,e,a,49); nR3(a,b,c,d,e,50); nR3(e,a,b,c,d,51);
++ nR3(d,e,a,b,c,52); nR3(c,d,e,a,b,53); nR3(b,c,d,e,a,54); nR3(a,b,c,d,e,55);
++ nR3(e,a,b,c,d,56); nR3(d,e,a,b,c,57); nR3(c,d,e,a,b,58); nR3(b,c,d,e,a,59);
++}
++
++void
++do_R4(uint32 *a, uint32 *b, uint32 *c, uint32 *d, uint32 *e, CHAR64LONG16 *block)
++{
++ nR4(a,b,c,d,e,60); nR4(e,a,b,c,d,61); nR4(d,e,a,b,c,62); nR4(c,d,e,a,b,63);
++ nR4(b,c,d,e,a,64); nR4(a,b,c,d,e,65); nR4(e,a,b,c,d,66); nR4(d,e,a,b,c,67);
++ nR4(c,d,e,a,b,68); nR4(b,c,d,e,a,69); nR4(a,b,c,d,e,70); nR4(e,a,b,c,d,71);
++ nR4(d,e,a,b,c,72); nR4(c,d,e,a,b,73); nR4(b,c,d,e,a,74); nR4(a,b,c,d,e,75);
++ nR4(e,a,b,c,d,76); nR4(d,e,a,b,c,77); nR4(c,d,e,a,b,78); nR4(b,c,d,e,a,79);
++}
++#endif
+
+ /*
+ * Hash a single 512-bit block. This is the core of the algorithm.
+@@ -66,12 +123,14 @@
+ lutil_SHA1Transform( uint32 *state, const unsigned char *buffer )
+ {
+ uint32 a, b, c, d, e;
++ CHAR64LONG16 *block;
+
+ #ifdef SHA1HANDSOFF
+- uint32 block[16];
++ CHAR64LONG16 workspace;
++ block = &workspace;
+ (void)AC_MEMCPY(block, buffer, 64);
+ #else
+- uint32 *block = (u_int32 *) buffer;
++ block = (CHAR64LONG16 *) (void *) buffer;
+ #endif
+
+ /* Copy context->state[] to working vars */
+@@ -81,6 +140,12 @@
+ d = state[3];
+ e = state[4];
+
++#ifdef __sparc_v9__
++ do_R01(&a, &b, &c, &d, &e, block);
++ do_R2(&a, &b, &c, &d, &e, block);
++ do_R3(&a, &b, &c, &d, &e, block);
++ do_R4(&a, &b, &c, &d, &e, block);
++#else
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+@@ -102,6 +167,7 @@
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
++#endif
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;