Crypto++
misc.h
1 #ifndef CRYPTOPP_MISC_H
2 #define CRYPTOPP_MISC_H
3 
4 #include "cryptlib.h"
5 #include "smartptr.h"
6 #include <string.h> // for memcpy and memmove
7 
8 #ifdef _MSC_VER
9  #if _MSC_VER >= 1400
10  // VC2005 workaround: disable declarations that conflict with winnt.h
11  #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
12  #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
13  #define _interlockedbittestandset64 CRYPTOPP_DISABLED_INTRINSIC_3
14  #define _interlockedbittestandreset64 CRYPTOPP_DISABLED_INTRINSIC_4
15  #include <intrin.h>
16  #undef _interlockedbittestandset
17  #undef _interlockedbittestandreset
18  #undef _interlockedbittestandset64
19  #undef _interlockedbittestandreset64
20  #define CRYPTOPP_FAST_ROTATE(x) 1
21  #elif _MSC_VER >= 1300
22  #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
23  #else
24  #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
25  #endif
26 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
27  (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
28  #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
29 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
30  #define CRYPTOPP_FAST_ROTATE(x) 1
31 #else
32  #define CRYPTOPP_FAST_ROTATE(x) 0
33 #endif
34 
35 #ifdef __BORLANDC__
36 #include <mem.h>
37 #endif
38 
39 #if defined(__GNUC__) && defined(__linux__)
40 #define CRYPTOPP_BYTESWAP_AVAILABLE
41 #include <byteswap.h>
42 #endif
43 
44 NAMESPACE_BEGIN(CryptoPP)
45 
46 // ************** compile-time assertion ***************
47 
48 template <bool b>
50 {
51  static char dummy[2*b-1];
52 };
53 
54 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
55 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
56 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
57 #else
58 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
59 #endif
60 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
61 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
62 
63 // ************** misc classes ***************
64 
65 class CRYPTOPP_DLL Empty
66 {
67 };
68 
69 //! _
70 template <class BASE1, class BASE2>
71 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
72 {
73 };
74 
75 //! _
76 template <class BASE1, class BASE2, class BASE3>
77 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
78 {
79 };
80 
81 template <class T>
83 {
84 protected:
85  T m_object;
86 };
87 
89 {
90 public:
91  NotCopyable() {}
92 private:
93  NotCopyable(const NotCopyable &);
94  void operator=(const NotCopyable &);
95 };
96 
97 template <class T>
98 struct NewObject
99 {
100  T* operator()() const {return new T;}
101 };
102 
103 /*! This function safely initializes a static object in a multithreaded environment without using locks (for portability).
104  Note that if two threads call Ref() at the same time, they may get back different references, and one object
105  may end up being memory leaked. This is by design.
106 */
107 template <class T, class F = NewObject<T>, int instance=0>
109 {
110 public:
111  Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
112 
113  // prevent this function from being inlined
114  CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
115 
116 private:
117  F m_objectFactory;
118 };
119 
120 template <class T, class F, int instance>
121 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
122 {
123  static volatile simple_ptr<T> s_pObject;
124  T *p = s_pObject.m_p;
125 
126  if (p)
127  return *p;
128 
129  T *newObject = m_objectFactory();
130  p = s_pObject.m_p;
131 
132  if (p)
133  {
134  delete newObject;
135  return *p;
136  }
137 
138  s_pObject.m_p = newObject;
139  return *newObject;
140 }
141 
142 // ************** misc functions ***************
143 
144 #if (!__STDC_WANT_SECURE_LIB__ && !defined(_MEMORY_S_DEFINED))
145 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
146 {
147  if (count > sizeInBytes)
148  throw InvalidArgument("memcpy_s: buffer overflow");
149  memcpy(dest, src, count);
150 }
151 
152 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
153 {
154  if (count > sizeInBytes)
155  throw InvalidArgument("memmove_s: buffer overflow");
156  memmove(dest, src, count);
157 }
158 
159 #if __BORLANDC__ >= 0x620
160 // C++Builder 2010 workaround: can't use std::memcpy_s because it doesn't allow 0 lengths
161 #define memcpy_s CryptoPP::memcpy_s
162 #define memmove_s CryptoPP::memmove_s
163 #endif
164 #endif
165 
166 inline void * memset_z(void *ptr, int value, size_t num)
167 {
168 // avoid extranous warning on GCC 4.3.2 Ubuntu 8.10
169 #if CRYPTOPP_GCC_VERSION >= 30001
170  if (__builtin_constant_p(num) && num==0)
171  return ptr;
172 #endif
173  return memset(ptr, value, num);
174 }
175 
176 // can't use std::min or std::max in MSVC60 or Cygwin 1.1.0
177 template <class T> inline const T& STDMIN(const T& a, const T& b)
178 {
179  return b < a ? b : a;
180 }
181 
182 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
183 {
184  CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
185  assert(a==0 || a>0); // GCC workaround: get rid of the warning "comparison is always true due to limited range of data type"
186  assert(b>=0);
187 
188  if (sizeof(T1)<=sizeof(T2))
189  return b < (T2)a ? (T1)b : a;
190  else
191  return (T1)b < a ? (T1)b : a;
192 }
193 
194 template <class T> inline const T& STDMAX(const T& a, const T& b)
195 {
196  return a < b ? b : a;
197 }
198 
199 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
200 
201 // this version of the macro is fastest on Pentium 3 and Pentium 4 with MSVC 6 SP5 w/ Processor Pack
202 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
203 // these may be faster on other CPUs/compilers
204 // #define GETBYTE(x, y) (unsigned int)(((x)>>(8*(y)))&255)
205 // #define GETBYTE(x, y) (((byte *)&(x))[y])
206 
207 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
208 
209 template <class T>
210 unsigned int Parity(T value)
211 {
212  for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
213  value ^= value >> i;
214  return (unsigned int)value&1;
215 }
216 
217 template <class T>
218 unsigned int BytePrecision(const T &value)
219 {
220  if (!value)
221  return 0;
222 
223  unsigned int l=0, h=8*sizeof(value);
224 
225  while (h-l > 8)
226  {
227  unsigned int t = (l+h)/2;
228  if (value >> t)
229  l = t;
230  else
231  h = t;
232  }
233 
234  return h/8;
235 }
236 
237 template <class T>
238 unsigned int BitPrecision(const T &value)
239 {
240  if (!value)
241  return 0;
242 
243  unsigned int l=0, h=8*sizeof(value);
244 
245  while (h-l > 1)
246  {
247  unsigned int t = (l+h)/2;
248  if (value >> t)
249  l = t;
250  else
251  h = t;
252  }
253 
254  return h;
255 }
256 
257 inline unsigned int TrailingZeros(word32 v)
258 {
259 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
260  return __builtin_ctz(v);
261 #elif defined(_MSC_VER) && _MSC_VER >= 1400
262  unsigned long result;
263  _BitScanForward(&result, v);
264  return result;
265 #else
266  // from http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup
267  static const int MultiplyDeBruijnBitPosition[32] =
268  {
269  0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
270  31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
271  };
272  return MultiplyDeBruijnBitPosition[((word32)((v & -v) * 0x077CB531U)) >> 27];
273 #endif
274 }
275 
276 inline unsigned int TrailingZeros(word64 v)
277 {
278 #if defined(__GNUC__) && CRYPTOPP_GCC_VERSION >= 30400
279  return __builtin_ctzll(v);
280 #elif defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(_M_X64) || defined(_M_IA64))
281  unsigned long result;
282  _BitScanForward64(&result, v);
283  return result;
284 #else
285  return word32(v) ? TrailingZeros(word32(v)) : 32 + TrailingZeros(word32(v>>32));
286 #endif
287 }
288 
289 template <class T>
290 inline T Crop(T value, size_t size)
291 {
292  if (size < 8*sizeof(value))
293  return T(value & ((T(1) << size) - 1));
294  else
295  return value;
296 }
297 
298 template <class T1, class T2>
299 inline bool SafeConvert(T1 from, T2 &to)
300 {
301  to = (T2)from;
302  if (from != to || (from > 0) != (to > 0))
303  return false;
304  return true;
305 }
306 
307 inline size_t BitsToBytes(size_t bitCount)
308 {
309  return ((bitCount+7)/(8));
310 }
311 
312 inline size_t BytesToWords(size_t byteCount)
313 {
314  return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
315 }
316 
317 inline size_t BitsToWords(size_t bitCount)
318 {
319  return ((bitCount+WORD_BITS-1)/(WORD_BITS));
320 }
321 
322 inline size_t BitsToDwords(size_t bitCount)
323 {
324  return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
325 }
326 
327 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
328 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
329 
330 CRYPTOPP_DLL bool CRYPTOPP_API VerifyBufsEqual(const byte *buf1, const byte *buf2, size_t count);
331 
332 template <class T>
333 inline bool IsPowerOf2(const T &n)
334 {
335  return n > 0 && (n & (n-1)) == 0;
336 }
337 
338 template <class T1, class T2>
339 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
340 {
341  assert(IsPowerOf2(b));
342  return T2(a) & (b-1);
343 }
344 
345 template <class T1, class T2>
346 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
347 {
348  if (IsPowerOf2(m))
349  return n - ModPowerOf2(n, m);
350  else
351  return n - n%m;
352 }
353 
354 template <class T1, class T2>
355 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
356 {
357  if (n+m-1 < n)
358  throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
359  return RoundDownToMultipleOf(n+m-1, m);
360 }
361 
362 template <class T>
363 inline unsigned int GetAlignmentOf(T *dummy=NULL) // VC60 workaround
364 {
365 #ifdef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
366  if (sizeof(T) < 16)
367  return 1;
368 #endif
369 
370 #if (_MSC_VER >= 1300)
371  return __alignof(T);
372 #elif defined(__GNUC__)
373  return __alignof__(T);
374 #elif CRYPTOPP_BOOL_SLOW_WORD64
375  return UnsignedMin(4U, sizeof(T));
376 #else
377  return sizeof(T);
378 #endif
379 }
380 
381 inline bool IsAlignedOn(const void *p, unsigned int alignment)
382 {
383  return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
384 }
385 
386 template <class T>
387 inline bool IsAligned(const void *p, T *dummy=NULL) // VC60 workaround
388 {
389  return IsAlignedOn(p, GetAlignmentOf<T>());
390 }
391 
392 #ifdef IS_LITTLE_ENDIAN
394 #else
395  typedef BigEndian NativeByteOrder;
396 #endif
397 
398 inline ByteOrder GetNativeByteOrder()
399 {
400  return NativeByteOrder::ToEnum();
401 }
402 
403 inline bool NativeByteOrderIs(ByteOrder order)
404 {
405  return order == GetNativeByteOrder();
406 }
407 
408 template <class T>
409 std::string IntToString(T a, unsigned int base = 10)
410 {
411  if (a == 0)
412  return "0";
413  bool negate = false;
414  if (a < 0)
415  {
416  negate = true;
417  a = 0-a; // VC .NET does not like -a
418  }
419  std::string result;
420  while (a > 0)
421  {
422  T digit = a % base;
423  result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
424  a /= base;
425  }
426  if (negate)
427  result = "-" + result;
428  return result;
429 }
430 
431 template <class T1, class T2>
432 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
433 {
434  return T1((a > b) ? (a - b) : 0);
435 }
436 
437 template <class T>
438 inline CipherDir GetCipherDir(const T &obj)
439 {
440  return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
441 }
442 
443 CRYPTOPP_DLL void CRYPTOPP_API CallNewHandler();
444 
445 inline void IncrementCounterByOne(byte *inout, unsigned int s)
446 {
447  for (int i=s-1, carry=1; i>=0 && carry; i--)
448  carry = !++inout[i];
449 }
450 
451 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
452 {
453  int i, carry;
454  for (i=s-1, carry=1; i>=0 && carry; i--)
455  carry = ((output[i] = input[i]+1) == 0);
456  memcpy_s(output, s, input, i+1);
457 }
458 
459 template <class T>
460 inline void ConditionalSwap(bool c, T &a, T &b)
461 {
462  T t = c * (a ^ b);
463  a ^= t;
464  b ^= t;
465 }
466 
467 template <class T>
468 inline void ConditionalSwapPointers(bool c, T &a, T &b)
469 {
470  ptrdiff_t t = c * (a - b);
471  a -= t;
472  b += t;
473 }
474 
475 // see http://www.dwheeler.com/secure-programs/Secure-Programs-HOWTO/protect-secrets.html
476 // and https://www.securecoding.cert.org/confluence/display/cplusplus/MSC06-CPP.+Be+aware+of+compiler+optimization+when+dealing+with+sensitive+data
477 template <class T>
478 void SecureWipeBuffer(T *buf, size_t n)
479 {
480  // GCC 4.3.2 on Cygwin optimizes away the first store if this loop is done in the forward direction
481  volatile T *p = buf+n;
482  while (n--)
483  *(--p) = 0;
484 }
485 
486 #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
487 
488 template<> inline void SecureWipeBuffer(byte *buf, size_t n)
489 {
490  volatile byte *p = buf;
491 #ifdef __GNUC__
492  asm volatile("rep stosb" : "+c"(n), "+D"(p) : "a"(0) : "memory");
493 #else
494  __stosb((byte *)(size_t)p, 0, n);
495 #endif
496 }
497 
498 template<> inline void SecureWipeBuffer(word16 *buf, size_t n)
499 {
500  volatile word16 *p = buf;
501 #ifdef __GNUC__
502  asm volatile("rep stosw" : "+c"(n), "+D"(p) : "a"(0) : "memory");
503 #else
504  __stosw((word16 *)(size_t)p, 0, n);
505 #endif
506 }
507 
508 template<> inline void SecureWipeBuffer(word32 *buf, size_t n)
509 {
510  volatile word32 *p = buf;
511 #ifdef __GNUC__
512  asm volatile("rep stosl" : "+c"(n), "+D"(p) : "a"(0) : "memory");
513 #else
514  __stosd((unsigned long *)(size_t)p, 0, n);
515 #endif
516 }
517 
518 template<> inline void SecureWipeBuffer(word64 *buf, size_t n)
519 {
520 #if CRYPTOPP_BOOL_X64
521  volatile word64 *p = buf;
522 #ifdef __GNUC__
523  asm volatile("rep stosq" : "+c"(n), "+D"(p) : "a"(0) : "memory");
524 #else
525  __stosq((word64 *)(size_t)p, 0, n);
526 #endif
527 #else
528  SecureWipeBuffer((word32 *)buf, 2*n);
529 #endif
530 }
531 
532 #endif // #if (_MSC_VER >= 1400 || defined(__GNUC__)) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86)
533 
534 template <class T>
535 inline void SecureWipeArray(T *buf, size_t n)
536 {
537  if (sizeof(T) % 8 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word64>() == 0)
538  SecureWipeBuffer((word64 *)buf, n * (sizeof(T)/8));
539  else if (sizeof(T) % 4 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word32>() == 0)
540  SecureWipeBuffer((word32 *)buf, n * (sizeof(T)/4));
541  else if (sizeof(T) % 2 == 0 && GetAlignmentOf<T>() % GetAlignmentOf<word16>() == 0)
542  SecureWipeBuffer((word16 *)buf, n * (sizeof(T)/2));
543  else
544  SecureWipeBuffer((byte *)buf, n * sizeof(T));
545 }
546 
547 // this function uses wcstombs(), which assumes that setlocale() has been called
548 static std::string StringNarrow(const wchar_t *str, bool throwOnError = true)
549 {
550 #ifdef _MSC_VER
551 #pragma warning(push)
552 #pragma warning(disable: 4996) // 'wcstombs': This function or variable may be unsafe.
553 #endif
554  size_t size = wcstombs(NULL, str, 0);
555  if (size == size_t(0)-1)
556  {
557  if (throwOnError)
558  throw InvalidArgument("StringNarrow: wcstombs() call failed");
559  else
560  return std::string();
561  }
562  std::string result(size, 0);
563  wcstombs(&result[0], str, size);
564  return result;
565 #ifdef _MSC_VER
566 #pragma warning(pop)
567 #endif
568 }
569 
570 #if CRYPTOPP_BOOL_ALIGN16_ENABLED
571 CRYPTOPP_DLL void * CRYPTOPP_API AlignedAllocate(size_t size);
572 CRYPTOPP_DLL void CRYPTOPP_API AlignedDeallocate(void *p);
573 #endif
574 
575 CRYPTOPP_DLL void * CRYPTOPP_API UnalignedAllocate(size_t size);
576 CRYPTOPP_DLL void CRYPTOPP_API UnalignedDeallocate(void *p);
577 
578 // ************** rotate functions ***************
579 
580 template <class T> inline T rotlFixed(T x, unsigned int y)
581 {
582  assert(y < sizeof(T)*8);
583  return y ? T((x<<y) | (x>>(sizeof(T)*8-y))) : x;
584 }
585 
586 template <class T> inline T rotrFixed(T x, unsigned int y)
587 {
588  assert(y < sizeof(T)*8);
589  return y ? T((x>>y) | (x<<(sizeof(T)*8-y))) : x;
590 }
591 
592 template <class T> inline T rotlVariable(T x, unsigned int y)
593 {
594  assert(y < sizeof(T)*8);
595  return T((x<<y) | (x>>(sizeof(T)*8-y)));
596 }
597 
598 template <class T> inline T rotrVariable(T x, unsigned int y)
599 {
600  assert(y < sizeof(T)*8);
601  return T((x>>y) | (x<<(sizeof(T)*8-y)));
602 }
603 
604 template <class T> inline T rotlMod(T x, unsigned int y)
605 {
606  y %= sizeof(T)*8;
607  return T((x<<y) | (x>>(sizeof(T)*8-y)));
608 }
609 
610 template <class T> inline T rotrMod(T x, unsigned int y)
611 {
612  y %= sizeof(T)*8;
613  return T((x>>y) | (x<<(sizeof(T)*8-y)));
614 }
615 
616 #ifdef _MSC_VER
617 
618 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
619 {
620  assert(y < 8*sizeof(x));
621  return y ? _lrotl(x, y) : x;
622 }
623 
624 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
625 {
626  assert(y < 8*sizeof(x));
627  return y ? _lrotr(x, y) : x;
628 }
629 
630 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
631 {
632  assert(y < 8*sizeof(x));
633  return _lrotl(x, y);
634 }
635 
636 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
637 {
638  assert(y < 8*sizeof(x));
639  return _lrotr(x, y);
640 }
641 
642 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
643 {
644  return _lrotl(x, y);
645 }
646 
647 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
648 {
649  return _lrotr(x, y);
650 }
651 
652 #endif // #ifdef _MSC_VER
653 
654 #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
655 // Intel C++ Compiler 10.0 calls a function instead of using the rotate instruction when using these instructions
656 
657 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
658 {
659  assert(y < 8*sizeof(x));
660  return y ? _rotl64(x, y) : x;
661 }
662 
663 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
664 {
665  assert(y < 8*sizeof(x));
666  return y ? _rotr64(x, y) : x;
667 }
668 
669 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
670 {
671  assert(y < 8*sizeof(x));
672  return _rotl64(x, y);
673 }
674 
675 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
676 {
677  assert(y < 8*sizeof(x));
678  return _rotr64(x, y);
679 }
680 
681 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
682 {
683  return _rotl64(x, y);
684 }
685 
686 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
687 {
688  return _rotr64(x, y);
689 }
690 
691 #endif // #if _MSC_VER >= 1310
692 
693 #if _MSC_VER >= 1400 && !defined(__INTEL_COMPILER)
694 // Intel C++ Compiler 10.0 gives undefined externals with these
695 
696 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
697 {
698  assert(y < 8*sizeof(x));
699  return y ? _rotl16(x, y) : x;
700 }
701 
702 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
703 {
704  assert(y < 8*sizeof(x));
705  return y ? _rotr16(x, y) : x;
706 }
707 
708 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
709 {
710  assert(y < 8*sizeof(x));
711  return _rotl16(x, y);
712 }
713 
714 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
715 {
716  assert(y < 8*sizeof(x));
717  return _rotr16(x, y);
718 }
719 
720 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
721 {
722  return _rotl16(x, y);
723 }
724 
725 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
726 {
727  return _rotr16(x, y);
728 }
729 
730 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
731 {
732  assert(y < 8*sizeof(x));
733  return y ? _rotl8(x, y) : x;
734 }
735 
736 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
737 {
738  assert(y < 8*sizeof(x));
739  return y ? _rotr8(x, y) : x;
740 }
741 
742 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
743 {
744  assert(y < 8*sizeof(x));
745  return _rotl8(x, y);
746 }
747 
748 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
749 {
750  assert(y < 8*sizeof(x));
751  return _rotr8(x, y);
752 }
753 
754 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
755 {
756  return _rotl8(x, y);
757 }
758 
759 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
760 {
761  return _rotr8(x, y);
762 }
763 
764 #endif // #if _MSC_VER >= 1400
765 
766 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
767 
768 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
769 {
770  assert(y < 32);
771  return y ? __rlwinm(x,y,0,31) : x;
772 }
773 
774 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
775 {
776  assert(y < 32);
777  return y ? __rlwinm(x,32-y,0,31) : x;
778 }
779 
780 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
781 {
782  assert(y < 32);
783  return (__rlwnm(x,y,0,31));
784 }
785 
786 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
787 {
788  assert(y < 32);
789  return (__rlwnm(x,32-y,0,31));
790 }
791 
792 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
793 {
794  return (__rlwnm(x,y,0,31));
795 }
796 
797 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
798 {
799  return (__rlwnm(x,32-y,0,31));
800 }
801 
802 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
803 
804 // ************** endian reversal ***************
805 
806 template <class T>
807 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
808 {
809  if (order == LITTLE_ENDIAN_ORDER)
810  return GETBYTE(value, index);
811  else
812  return GETBYTE(value, sizeof(T)-index-1);
813 }
814 
815 inline byte ByteReverse(byte value)
816 {
817  return value;
818 }
819 
820 inline word16 ByteReverse(word16 value)
821 {
822 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
823  return bswap_16(value);
824 #elif defined(_MSC_VER) && _MSC_VER >= 1300
825  return _byteswap_ushort(value);
826 #else
827  return rotlFixed(value, 8U);
828 #endif
829 }
830 
831 inline word32 ByteReverse(word32 value)
832 {
833 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
834  __asm__ ("bswap %0" : "=r" (value) : "0" (value));
835  return value;
836 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
837  return bswap_32(value);
838 #elif defined(__MWERKS__) && TARGET_CPU_PPC
839  return (word32)__lwbrx(&value,0);
840 #elif _MSC_VER >= 1400 || (_MSC_VER >= 1300 && !defined(_DLL))
841  return _byteswap_ulong(value);
842 #elif CRYPTOPP_FAST_ROTATE(32)
843  // 5 instructions with rotate instruction, 9 without
844  return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
845 #else
846  // 6 instructions with rotate instruction, 8 without
847  value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
848  return rotlFixed(value, 16U);
849 #endif
850 }
851 
852 inline word64 ByteReverse(word64 value)
853 {
854 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
855  __asm__ ("bswap %0" : "=r" (value) : "0" (value));
856  return value;
857 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
858  return bswap_64(value);
859 #elif defined(_MSC_VER) && _MSC_VER >= 1300
860  return _byteswap_uint64(value);
861 #elif CRYPTOPP_BOOL_SLOW_WORD64
862  return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
863 #else
864  value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
865  value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
866  return rotlFixed(value, 32U);
867 #endif
868 }
869 
870 inline byte BitReverse(byte value)
871 {
872  value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
873  value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
874  return rotlFixed(value, 4U);
875 }
876 
877 inline word16 BitReverse(word16 value)
878 {
879  value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
880  value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
881  value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
882  return ByteReverse(value);
883 }
884 
885 inline word32 BitReverse(word32 value)
886 {
887  value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
888  value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
889  value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
890  return ByteReverse(value);
891 }
892 
893 inline word64 BitReverse(word64 value)
894 {
895 #if CRYPTOPP_BOOL_SLOW_WORD64
896  return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
897 #else
898  value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
899  value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
900  value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
901  return ByteReverse(value);
902 #endif
903 }
904 
905 template <class T>
906 inline T BitReverse(T value)
907 {
908  if (sizeof(T) == 1)
909  return (T)BitReverse((byte)value);
910  else if (sizeof(T) == 2)
911  return (T)BitReverse((word16)value);
912  else if (sizeof(T) == 4)
913  return (T)BitReverse((word32)value);
914  else
915  {
916  assert(sizeof(T) == 8);
917  return (T)BitReverse((word64)value);
918  }
919 }
920 
921 template <class T>
922 inline T ConditionalByteReverse(ByteOrder order, T value)
923 {
924  return NativeByteOrderIs(order) ? value : ByteReverse(value);
925 }
926 
927 template <class T>
928 void ByteReverse(T *out, const T *in, size_t byteCount)
929 {
930  assert(byteCount % sizeof(T) == 0);
931  size_t count = byteCount/sizeof(T);
932  for (size_t i=0; i<count; i++)
933  out[i] = ByteReverse(in[i]);
934 }
935 
936 template <class T>
937 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
938 {
939  if (!NativeByteOrderIs(order))
940  ByteReverse(out, in, byteCount);
941  else if (in != out)
942  memcpy_s(out, byteCount, in, byteCount);
943 }
944 
945 template <class T>
946 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
947 {
948  const size_t U = sizeof(T);
949  assert(inlen <= outlen*U);
950  memcpy_s(out, outlen*U, in, inlen);
951  memset_z((byte *)out+inlen, 0, outlen*U-inlen);
952  ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
953 }
954 
955 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
956 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
957 {
958  return block[0];
959 }
960 
961 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
962 {
963  return (order == BIG_ENDIAN_ORDER)
964  ? block[1] | (block[0] << 8)
965  : block[0] | (block[1] << 8);
966 }
967 
968 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
969 {
970  return (order == BIG_ENDIAN_ORDER)
971  ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
972  : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
973 }
974 
975 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
976 {
977  return (order == BIG_ENDIAN_ORDER)
978  ?
979  (word64(block[7]) |
980  (word64(block[6]) << 8) |
981  (word64(block[5]) << 16) |
982  (word64(block[4]) << 24) |
983  (word64(block[3]) << 32) |
984  (word64(block[2]) << 40) |
985  (word64(block[1]) << 48) |
986  (word64(block[0]) << 56))
987  :
988  (word64(block[0]) |
989  (word64(block[1]) << 8) |
990  (word64(block[2]) << 16) |
991  (word64(block[3]) << 24) |
992  (word64(block[4]) << 32) |
993  (word64(block[5]) << 40) |
994  (word64(block[6]) << 48) |
995  (word64(block[7]) << 56));
996 }
997 
998 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
999 {
1000  block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
1001 }
1002 
1003 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
1004 {
1005  if (order == BIG_ENDIAN_ORDER)
1006  {
1007  if (xorBlock)
1008  {
1009  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1010  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1011  }
1012  else
1013  {
1014  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1015  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1016  }
1017  }
1018  else
1019  {
1020  if (xorBlock)
1021  {
1022  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1023  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1024  }
1025  else
1026  {
1027  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1028  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1029  }
1030  }
1031 }
1032 
1033 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
1034 {
1035  if (order == BIG_ENDIAN_ORDER)
1036  {
1037  if (xorBlock)
1038  {
1039  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1040  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1041  block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1042  block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1043  }
1044  else
1045  {
1046  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1047  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1048  block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1049  block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1050  }
1051  }
1052  else
1053  {
1054  if (xorBlock)
1055  {
1056  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1057  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1058  block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1059  block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1060  }
1061  else
1062  {
1063  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1064  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1065  block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1066  block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1067  }
1068  }
1069 }
1070 
1071 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
1072 {
1073  if (order == BIG_ENDIAN_ORDER)
1074  {
1075  if (xorBlock)
1076  {
1077  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
1078  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
1079  block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
1080  block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
1081  block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1082  block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1083  block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1084  block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1085  }
1086  else
1087  {
1088  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
1089  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
1090  block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
1091  block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
1092  block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1093  block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1094  block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1095  block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1096  }
1097  }
1098  else
1099  {
1100  if (xorBlock)
1101  {
1102  block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1103  block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1104  block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1105  block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1106  block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
1107  block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
1108  block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
1109  block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
1110  }
1111  else
1112  {
1113  block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
1114  block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
1115  block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
1116  block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
1117  block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
1118  block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
1119  block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
1120  block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
1121  }
1122  }
1123 }
1124 #endif // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
1125 
1126 template <class T>
1127 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
1128 {
1129 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
1130  if (!assumeAligned)
1131  return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
1132  assert(IsAligned<T>(block));
1133 #endif
1134  return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
1135 }
1136 
1137 template <class T>
1138 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
1139 {
1140  result = GetWord<T>(assumeAligned, order, block);
1141 }
1142 
1143 template <class T>
1144 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
1145 {
1146 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
1147  if (!assumeAligned)
1148  return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
1149  assert(IsAligned<T>(block));
1150  assert(IsAligned<T>(xorBlock));
1151 #endif
1152  *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
1153 }
1154 
1155 template <class T, class B, bool A=false>
1157 {
1158 public:
1159  GetBlock(const void *block)
1160  : m_block((const byte *)block) {}
1161 
1162  template <class U>
1163  inline GetBlock<T, B, A> & operator()(U &x)
1164  {
1165  CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
1166  x = GetWord<T>(A, B::ToEnum(), m_block);
1167  m_block += sizeof(T);
1168  return *this;
1169  }
1170 
1171 private:
1172  const byte *m_block;
1173 };
1174 
1175 template <class T, class B, bool A=false>
1177 {
1178 public:
1179  PutBlock(const void *xorBlock, void *block)
1180  : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
1181 
1182  template <class U>
1183  inline PutBlock<T, B, A> & operator()(U x)
1184  {
1185  PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
1186  m_block += sizeof(T);
1187  if (m_xorBlock)
1188  m_xorBlock += sizeof(T);
1189  return *this;
1190  }
1191 
1192 private:
1193  const byte *m_xorBlock;
1194  byte *m_block;
1195 };
1196 
1197 template <class T, class B, bool GA=false, bool PA=false>
1199 {
1200  // function needed because of C++ grammatical ambiguity between expression-statements and declarations
1201  static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
1202  typedef PutBlock<T, B, PA> Put;
1203 };
1204 
1205 template <class T>
1206 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
1207 {
1208  if (!NativeByteOrderIs(order))
1209  value = ByteReverse(value);
1210 
1211  return std::string((char *)&value, sizeof(value));
1212 }
1213 
1214 template <class T>
1215 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
1216 {
1217  T value = 0;
1218  memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
1219  return NativeByteOrderIs(order) ? value : ByteReverse(value);
1220 }
1221 
1222 // ************** help remove warning on g++ ***************
1223 
1224 template <bool overflow> struct SafeShifter;
1225 
1226 template<> struct SafeShifter<true>
1227 {
1228  template <class T>
1229  static inline T RightShift(T value, unsigned int bits)
1230  {
1231  return 0;
1232  }
1233 
1234  template <class T>
1235  static inline T LeftShift(T value, unsigned int bits)
1236  {
1237  return 0;
1238  }
1239 };
1240 
1241 template<> struct SafeShifter<false>
1242 {
1243  template <class T>
1244  static inline T RightShift(T value, unsigned int bits)
1245  {
1246  return value >> bits;
1247  }
1248 
1249  template <class T>
1250  static inline T LeftShift(T value, unsigned int bits)
1251  {
1252  return value << bits;
1253  }
1254 };
1255 
1256 template <unsigned int bits, class T>
1257 inline T SafeRightShift(T value)
1258 {
1259  return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
1260 }
1261 
1262 template <unsigned int bits, class T>
1263 inline T SafeLeftShift(T value)
1264 {
1265  return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
1266 }
1267 
1268 // ************** use one buffer for multiple data members ***************
1269 
1270 #define CRYPTOPP_BLOCK_1(n, t, s) t* m_##n() {return (t *)(m_aggregate+0);} size_t SS1() {return sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1271 #define CRYPTOPP_BLOCK_2(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS1());} size_t SS2() {return SS1()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1272 #define CRYPTOPP_BLOCK_3(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS2());} size_t SS3() {return SS2()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1273 #define CRYPTOPP_BLOCK_4(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS3());} size_t SS4() {return SS3()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1274 #define CRYPTOPP_BLOCK_5(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS4());} size_t SS5() {return SS4()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1275 #define CRYPTOPP_BLOCK_6(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS5());} size_t SS6() {return SS5()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1276 #define CRYPTOPP_BLOCK_7(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS6());} size_t SS7() {return SS6()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1277 #define CRYPTOPP_BLOCK_8(n, t, s) t* m_##n() {return (t *)(m_aggregate+SS7());} size_t SS8() {return SS7()+sizeof(t)*(s);} size_t m_##n##Size() {return (s);}
1278 #define CRYPTOPP_BLOCKS_END(i) size_t SST() {return SS##i();} void AllocateBlocks() {m_aggregate.New(SST());} AlignedSecByteBlock m_aggregate;
1279 
1280 NAMESPACE_END
1281 
1282 #endif
exception thrown when an invalid argument is detected
Definition: cryptlib.h:145
CipherDir
used to specify a direction for a cipher to operate in (encrypt or decrypt)
Definition: cryptlib.h:93
Definition: misc.h:98
_
Definition: misc.h:71
_
Definition: misc.h:77
Definition: misc.h:65