Crypto++ 8.7
Free C++ class library of cryptographic schemes
gcm_simd.cpp
1// gcm_simd.cpp - written and placed in the public domain by
2// Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3// Original x86 CLMUL by Wei Dai. ARM and POWER8
4// PMULL and VMULL by JW, UB and MR.
5//
6// This source file uses intrinsics to gain access to SSE4.2 and
7// ARMv8a CRC-32 and CRC-32C instructions. A separate source file
8// is needed because additional CXXFLAGS are required to enable
9// the appropriate instructions sets in some build configurations.
10
11#include "pch.h"
12#include "config.h"
13#include "misc.h"
14
15#if defined(CRYPTOPP_DISABLE_GCM_ASM)
16# undef CRYPTOPP_X86_ASM_AVAILABLE
17# undef CRYPTOPP_X32_ASM_AVAILABLE
18# undef CRYPTOPP_X64_ASM_AVAILABLE
19# undef CRYPTOPP_SSE2_ASM_AVAILABLE
20#endif
21
22#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23# include <emmintrin.h>
24# include <xmmintrin.h>
25#endif
26
27#if (CRYPTOPP_CLMUL_AVAILABLE)
28# include <tmmintrin.h>
29# include <wmmintrin.h>
30#endif
31
32#if (CRYPTOPP_ARM_NEON_HEADER)
33# include <stdint.h>
34# include <arm_neon.h>
35#endif
36
37#if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
38# include "arm_simd.h"
39#endif
40
41#if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
42# include "ppc_simd.h"
43#endif
44
45#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
46# include <signal.h>
47# include <setjmp.h>
48#endif
49
50#ifndef EXCEPTION_EXECUTE_HANDLER
51# define EXCEPTION_EXECUTE_HANDLER 1
52#endif
53
54// Squash MS LNK4221 and libtool warnings
55extern const char GCM_SIMD_FNAME[] = __FILE__;
56
57NAMESPACE_BEGIN(CryptoPP)
58
59// ************************* Feature Probes ************************* //
60
61#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
62extern "C" {
63 typedef void (*SigHandler)(int);
64
65 static jmp_buf s_jmpSIGILL;
66 static void SigIllHandler(int)
67 {
68 longjmp(s_jmpSIGILL, 1);
69 }
70}
71#endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
72
73#if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
74bool CPU_ProbePMULL()
75{
76#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
77 return false;
78#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
79# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
80 volatile bool result = true;
81 __try
82 {
83 // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
84 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
85 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
86
87 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
88 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
89 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
90 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
91 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
92
93 const uint64x2_t r1 = PMULL_00(a1, b1);
94 const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
95 vreinterpretq_u64_u8(b2));
96
97 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
98 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
99 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
100 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
101 }
102 __except (EXCEPTION_EXECUTE_HANDLER)
103 {
104 return false;
105 }
106 return result;
107# else
108
109 // longjmp and clobber warnings. Volatile is required.
110 volatile bool result = true;
111
112 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
113 if (oldHandler == SIG_ERR)
114 return false;
115
116 volatile sigset_t oldMask;
117 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
118 {
119 signal(SIGILL, oldHandler);
120 return false;
121 }
122
123 if (setjmp(s_jmpSIGILL))
124 result = false;
125 else
126 {
127 // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
128 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
129 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
130
131 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
132 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
133 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
134 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
135 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
136
137 const uint64x2_t r1 = PMULL_00(a1, b1);
138 const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
139 vreinterpretq_u64_u8(b2));
140
141 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
142 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
143 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
144 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
145 }
146
147 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
148 signal(SIGILL, oldHandler);
149 return result;
150# endif
151#else
152 return false;
153#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
154}
155#endif // ARM32 or ARM64
156
157// *************************** ARM NEON *************************** //
158
159#if CRYPTOPP_ARM_NEON_AVAILABLE
160void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
161{
162 vst1q_u8(a, veorq_u8(vld1q_u8(b), vld1q_u8(c)));
163}
164#endif // CRYPTOPP_ARM_NEON_AVAILABLE
165
166#if CRYPTOPP_ARM_PMULL_AVAILABLE
167
168// Swaps high and low 64-bit words
169inline uint64x2_t SwapWords(const uint64x2_t& data)
170{
171 return (uint64x2_t)vcombine_u64(
172 vget_high_u64(data), vget_low_u64(data));
173}
174
175uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
176{
177 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
178 c1 = veorq_u64(c1, PMULL_01(c0, r));
179 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
180 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
181 c0 = PMULL_00(c0, r);
182 c2 = veorq_u64(c2, c0);
183 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
184 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
185 c2 = vshlq_n_u64(c2, 1);
186
187 return veorq_u64(c2, c1);
188}
189
190uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
191{
192 const uint64x2_t c0 = PMULL_00(x, h);
193 const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
194 const uint64x2_t c2 = PMULL_11(x, h);
195
196 return GCM_Reduce_PMULL(c0, c1, c2, r);
197}
198
199void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
200{
201 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
202 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
203 const uint64x2_t h0 = vextq_u64(t, t, 1);
204
205 uint64x2_t h = h0;
206 unsigned int i;
207 for (i=0; i<tableSize-32; i+=32)
208 {
209 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
210 vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
211 vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
212 vst1q_u64(UINT64_CAST(mulTable+i+8), h);
213 vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
214 h = GCM_Multiply_PMULL(h1, h0, r);
215 }
216
217 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
218 vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
219 vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
220 vst1q_u64(UINT64_CAST(mulTable+i+8), h);
221 vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
222}
223
224size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
225{
226 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
227 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
228
229 while (len >= 16)
230 {
231 size_t i=0, s = UnsignedMin(len/16U, 8U);
232 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
233 uint64x2_t c0 = vdupq_n_u64(0);
234 uint64x2_t c1 = vdupq_n_u64(0);
235 uint64x2_t c2 = vdupq_n_u64(0);
236
237 while (true)
238 {
239 const uint64x2_t h0 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+0)*16));
240 const uint64x2_t h1 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+1)*16));
241 const uint64x2_t h2 = veorq_u64(h0, h1);
242
243 if (++i == s)
244 {
245 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
246 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
247 c0 = veorq_u64(c0, PMULL_00(d1, h0));
248 c2 = veorq_u64(c2, PMULL_10(d1, h1));
249 d1 = veorq_u64(d1, SwapWords(d1));
250 c1 = veorq_u64(c1, PMULL_00(d1, h2));
251
252 break;
253 }
254
255 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
256 c0 = veorq_u64(c0, PMULL_10(d2, h0));
257 c2 = veorq_u64(c2, PMULL_10(d1, h1));
258 d2 = veorq_u64(d2, d1);
259 c1 = veorq_u64(c1, PMULL_10(d2, h2));
260
261 if (++i == s)
262 {
263 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
264 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
265 c0 = veorq_u64(c0, PMULL_01(d1, h0));
266 c2 = veorq_u64(c2, PMULL_11(d1, h1));
267 d1 = veorq_u64(d1, SwapWords(d1));
268 c1 = veorq_u64(c1, PMULL_01(d1, h2));
269
270 break;
271 }
272
273 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
274 d2 = vextq_u64(t3, t3, 1);
275 c0 = veorq_u64(c0, PMULL_01(d1, h0));
276 c2 = veorq_u64(c2, PMULL_01(d2, h1));
277 d1 = veorq_u64(d1, d2);
278 c1 = veorq_u64(c1, PMULL_01(d1, h2));
279 }
280 data += s*16;
281 len -= s*16;
282
283 c1 = veorq_u64(veorq_u64(c1, c0), c2);
284 x = GCM_Reduce_PMULL(c0, c1, c2, r);
285 }
286
287 vst1q_u64(UINT64_CAST(hbuffer), x);
288 return len;
289}
290
291void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
292{
294 {
295 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
296 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
297 }
298}
299#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
300
301// ***************************** SSE ***************************** //
302
303#if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
304// SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
305// a source file with a SSE architecture switch. Also see GH #226 and GH #284.
306void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
307{
308# if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
309 asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
310 : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
311# else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
312 _mm_store_si128(M128_CAST(a), _mm_xor_si128(
313 _mm_load_si128(CONST_M128_CAST(b)),
314 _mm_load_si128(CONST_M128_CAST(c))));
315# endif
316}
317#endif // CRYPTOPP_SSE2_ASM_AVAILABLE
318
319#if CRYPTOPP_CLMUL_AVAILABLE
320
321#if 0
322// preserved for testing
323void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
324{
325 word64 Z0=0, Z1=0, V0, V1;
326
328 Block::Get(a)(V0)(V1);
329
330 for (int i=0; i<16; i++)
331 {
332 for (int j=0x80; j!=0; j>>=1)
333 {
334 int x = b[i] & j;
335 Z0 ^= x ? V0 : 0;
336 Z1 ^= x ? V1 : 0;
337 x = (int)V1 & 1;
338 V1 = (V1>>1) | (V0<<63);
339 V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
340 }
341 }
342 Block::Put(NULLPTR, c)(Z0)(Z1);
343}
344
345__m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
346{
347 word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
348 word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
349
350 PolynomialMod2 pa((byte *)A, 8);
351 PolynomialMod2 pb((byte *)B, 8);
352 PolynomialMod2 c = pa*pb;
353
354 __m128i output;
355 for (int i=0; i<16; i++)
356 ((byte *)&output)[i] = c.GetByte(i);
357 return output;
358}
359#endif // Testing
360
361// Swaps high and low 64-bit words
362inline __m128i SwapWords(const __m128i& val)
363{
364 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
365}
366
367// SunCC 5.11-5.15 compiler crash. Make the function inline
368// and parameters non-const. Also see GH #188 and GH #224.
369inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
370{
371 /*
372 The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
373 significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
374 rightmost bit positions, and the lowest byte addresses.
375
376 c1 ^= c0t * 0xc200000000000000
377 c2t ^= c0t
378 t = shift (c1t ^ c0b) left 1 bit
379 c2 ^= t * 0xe100000000000000
380 c2t ^= c1b
381 shift c2 left 1 bit and xor in lowest bit of c1t
382 */
383 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
384 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
385 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
386 c0 = _mm_slli_epi64(c0, 1);
387 c0 = _mm_clmulepi64_si128(c0, r, 0);
388 c2 = _mm_xor_si128(c2, c0);
389 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
390 c1 = _mm_unpacklo_epi64(c1, c2);
391 c1 = _mm_srli_epi64(c1, 63);
392 c2 = _mm_slli_epi64(c2, 1);
393 return _mm_xor_si128(c2, c1);
394}
395
396// SunCC 5.13-5.14 compiler crash. Don't make the function inline.
397// This is in contrast to GCM_Reduce_CLMUL, which must be inline.
398__m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
399{
400 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
401 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
402 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
403
404 return GCM_Reduce_CLMUL(c0, c1, c2, r);
405}
406
407void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
408{
409 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
410 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
411 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
412
413 unsigned int i;
414 for (i=0; i<tableSize-32; i+=32)
415 {
416 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
417 _mm_storel_epi64(M128_CAST(mulTable+i), h);
418 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
419 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
420 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
421 h = GCM_Multiply_CLMUL(h1, h0, r);
422 }
423
424 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
425 _mm_storel_epi64(M128_CAST(mulTable+i), h);
426 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
427 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
428 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
429}
430
431size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
432{
433 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
434 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
435 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
436 __m128i x = _mm_load_si128(M128_CAST(hbuffer));
437
438 while (len >= 16)
439 {
440 size_t i=0, s = UnsignedMin(len/16, 8U);
441 __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
442 __m128i d2 = _mm_shuffle_epi8(d1, m2);
443 __m128i c0 = _mm_setzero_si128();
444 __m128i c1 = _mm_setzero_si128();
445 __m128i c2 = _mm_setzero_si128();
446
447 while (true)
448 {
449 const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
450 const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
451 const __m128i h2 = _mm_xor_si128(h0, h1);
452
453 if (++i == s)
454 {
455 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
456 d1 = _mm_xor_si128(d1, x);
457 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
458 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
459 d1 = _mm_xor_si128(d1, SwapWords(d1));
460 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
461 break;
462 }
463
464 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
465 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
466 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
467 d2 = _mm_xor_si128(d2, d1);
468 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
469
470 if (++i == s)
471 {
472 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
473 d1 = _mm_xor_si128(d1, x);
474 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
475 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
476 d1 = _mm_xor_si128(d1, SwapWords(d1));
477 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
478 break;
479 }
480
481 d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
482 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
483 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
484 d1 = _mm_xor_si128(d1, d2);
485 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
486 }
487 data += s*16;
488 len -= s*16;
489
490 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
491 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
492 }
493
494 _mm_store_si128(M128_CAST(hbuffer), x);
495 return len;
496}
497
498void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
499{
500 // SSSE3 instruction, but only used with CLMUL
501 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
502 _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
503 _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
504}
505#endif // CRYPTOPP_CLMUL_AVAILABLE
506
507// ***************************** POWER8 ***************************** //
508
509#if CRYPTOPP_POWER8_AVAILABLE
510void GCM_Xor16_POWER8(byte *a, const byte *b, const byte *c)
511{
512 VecStore(VecXor(VecLoad(b), VecLoad(c)), a);
513}
514#endif // CRYPTOPP_POWER8_AVAILABLE
515
516#if CRYPTOPP_POWER8_VMULL_AVAILABLE
517
518uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
519{
520 const uint64x2_p m1 = {1,1}, m63 = {63,63};
521
522 c1 = VecXor(c1, VecShiftRightOctet<8>(c0));
523 c1 = VecXor(c1, VecIntelMultiply10(c0, r));
524 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0));
525 c0 = VecIntelMultiply00(vec_sl(c0, m1), r);
526 c2 = VecXor(c2, c0);
527 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1));
528 c1 = vec_sr(vec_mergeh(c1, c2), m63);
529 c2 = vec_sl(c2, m1);
530
531 return VecXor(c2, c1);
532}
533
534inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
535{
536 const uint64x2_p c0 = VecIntelMultiply00(x, h);
538 const uint64x2_p c2 = VecIntelMultiply11(x, h);
539
540 return GCM_Reduce_VMULL(c0, c1, c2, r);
541}
542
543inline uint64x2_p LoadHashKey(const byte *hashKey)
544{
545#if (CRYPTOPP_BIG_ENDIAN)
546 const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
547 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
548 return VecPermute(key, key, mask);
549#else
550 const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
551 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
552 return VecPermute(key, key, mask);
553#endif
554}
555
556void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
557{
558 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
559 uint64x2_p h = LoadHashKey(hashKey), h0 = h;
560
561 unsigned int i;
562 uint64_t temp[2];
563
564 for (i=0; i<tableSize-32; i+=32)
565 {
566 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
567 VecStore(h, (byte*)temp);
568 std::memcpy(mulTable+i, temp+0, 8);
569 VecStore(h1, mulTable+i+16);
570 VecStore(h, mulTable+i+8);
571 VecStore(h1, (byte*)temp);
572 std::memcpy(mulTable+i+8, temp+0, 8);
573 h = GCM_Multiply_VMULL(h1, h0, r);
574 }
575
576 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
577 VecStore(h, (byte*)temp);
578 std::memcpy(mulTable+i, temp+0, 8);
579 VecStore(h1, mulTable+i+16);
580 VecStore(h, mulTable+i+8);
581 VecStore(h1, (byte*)temp);
582 std::memcpy(mulTable+i+8, temp+0, 8);
583}
584
585// Swaps high and low 64-bit words
586template <class T>
587inline T SwapWords(const T& data)
588{
589 return (T)VecRotateLeftOctet<8>(data);
590}
591
592inline uint64x2_p LoadBuffer1(const byte *dataBuffer)
593{
594#if (CRYPTOPP_BIG_ENDIAN)
595 return (uint64x2_p)VecLoad(dataBuffer);
596#else
597 const uint64x2_p data = (uint64x2_p)VecLoad(dataBuffer);
598 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
599 return VecPermute(data, data, mask);
600#endif
601}
602
603inline uint64x2_p LoadBuffer2(const byte *dataBuffer)
604{
605#if (CRYPTOPP_BIG_ENDIAN)
606 return (uint64x2_p)SwapWords(VecLoadBE(dataBuffer));
607#else
608 return (uint64x2_p)VecLoadBE(dataBuffer);
609#endif
610}
611
612size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
613{
614 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
615 uint64x2_p x = (uint64x2_p)VecLoad(hbuffer);
616
617 while (len >= 16)
618 {
619 size_t i=0, s = UnsignedMin(len/16, 8U);
620 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
621 uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
622
623 while (true)
624 {
625 const uint64x2_p h0 = (uint64x2_p)VecLoad(mtable+(i+0)*16);
626 const uint64x2_p h1 = (uint64x2_p)VecLoad(mtable+(i+1)*16);
627 const uint64x2_p h2 = (uint64x2_p)VecXor(h0, h1);
628
629 if (++i == s)
630 {
631 d1 = LoadBuffer2(data);
632 d1 = VecXor(d1, x);
633 c0 = VecXor(c0, VecIntelMultiply00(d1, h0));
634 c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
635 d1 = VecXor(d1, SwapWords(d1));
636 c1 = VecXor(c1, VecIntelMultiply00(d1, h2));
637 break;
638 }
639
640 d1 = LoadBuffer1(data+(s-i)*16-8);
641 c0 = VecXor(c0, VecIntelMultiply01(d2, h0));
642 c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
643 d2 = VecXor(d2, d1);
644 c1 = VecXor(c1, VecIntelMultiply01(d2, h2));
645
646 if (++i == s)
647 {
648 d1 = LoadBuffer2(data);
649 d1 = VecXor(d1, x);
650 c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
651 c2 = VecXor(c2, VecIntelMultiply11(d1, h1));
652 d1 = VecXor(d1, SwapWords(d1));
653 c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
654 break;
655 }
656
657 d2 = LoadBuffer2(data+(s-i)*16-8);
658 c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
659 c2 = VecXor(c2, VecIntelMultiply10(d2, h1));
660 d1 = VecXor(d1, d2);
661 c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
662 }
663 data += s*16;
664 len -= s*16;
665
666 c1 = VecXor(VecXor(c1, c0), c2);
667 x = GCM_Reduce_VMULL(c0, c1, c2, r);
668 }
669
670 VecStore(x, hbuffer);
671 return len;
672}
673
674void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
675{
676 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
677 VecStore(VecPermute(VecLoad(hashBuffer), mask), hashBuffer);
678}
679#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
680
681NAMESPACE_END
#define M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:609
#define CONST_M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:614
Support functions for ARM and vector operations.
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:152
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:242
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:182
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:212
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:27
Access a block of memory.
Definition: misc.h:2807
Library configuration file.
#define W64LIT(x)
Declare an unsigned word64.
Definition: config_int.h:119
unsigned long long word64
64-bit unsigned datatype
Definition: config_int.h:91
@ BIG_ENDIAN_ORDER
byte order is big-endian
Definition: cryptlib.h:147
Utility functions for the Crypto++ library.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:2022
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be negative and incorrectly promoted.
Definition: misc.h:694
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1264
Crypto++ library namespace.
Precompiled header file.
Support functions for PowerPC and vector operations.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:742
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
Definition: ppc_simd.h:1478
uint64x2_p VecIntelMultiply00(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2517
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
Definition: ppc_simd.h:192
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Definition: ppc_simd.h:1414
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Definition: ppc_simd.h:212
uint64x2_p VecIntelMultiply11(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2583
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
Definition: ppc_simd.h:895
uint64x2_p VecIntelMultiply01(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2539
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:369
uint64x2_p VecIntelMultiply10(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2561
Access a block of memory.
Definition: misc.h:2844