Hasher.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9 -------------------------------------------------------------------------------
10 License
11  This file is part of OpenFOAM.
12 
13  OpenFOAM is free software: you can redistribute it and/or modify it
14  under the terms of the GNU General Public License as published by
15  the Free Software Foundation, either version 3 of the License, or
16  (at your option) any later version.
17 
18  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
19  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21  for more details.
22 
23  You should have received a copy of the GNU General Public License
24  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
25 
26 Description
27  Hashing functions, mostly from Bob Jenkins
28 \*---------------------------------------------------------------------------*/
29 
30 #include "Hasher.H"
31 #include "HasherInt.H"
32 #include "endian.H"
33 
34 // Left-rotate a 32-bit value and carry by nBits
35 #define bitRotateLeft(x, nBits) (((x) << (nBits)) | ((x) >> (32 - (nBits))))
36 
37 
38 // ----------------------------------------------------------------------------
39 // lookup3.c, by Bob Jenkins, May 2006, Public Domain.
40 //
41 // These are functions for producing 32-bit hashes for hash table lookup.
42 // hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
43 // are externally useful functions. Routines to test the hash are included
44 // if SELF_TEST is defined. You can use this free for any purpose. It's in
45 // the public domain. It has no warranty.
46 //
47 // You probably want to use hashlittle(). hashlittle() and hashbig()
48 // hash byte arrays. hashlittle() is is faster than hashbig() on
49 // little-endian machines. Intel and AMD are little-endian machines.
50 // On second thought, you probably want hashlittle2(), which is identical to
51 // hashlittle() except it returns two 32-bit hashes for the price of one.
52 // You could implement hashbig2() if you wanted but I haven't bothered here.
53 //
54 // If you want to find a hash of, say, exactly 7 integers, do
55 // a = i1; b = i2; c = i3;
56 // mix(a,b,c);
57 // a += i4; b += i5; c += i6;
58 // mix(a,b,c);
59 // a += i7;
60 // final(a,b,c);
61 // then use c as the hash value. If you have a variable length array of
62 // 4-byte integers to hash, use hashword(). If you have a byte array (like
63 // a character string), use hashlittle(). If you have several byte arrays, or
64 // a mix of things, see the comments above hashlittle().
65 //
66 // Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
67 // then mix those integers. This is fast (you can do a lot more thorough
68 // mixing with 12*3 instructions on 3 integers than you can with 3 instructions
69 // on 1 byte), but shoehorning those bytes into integers efficiently is messy.
70 // ----------------------------------------------------------------------------
71 
72 // ----------------------------------------------------------------------------
73 // mix -- mix 3 32-bit values reversibly.
74 //
75 // This is reversible, so any information in (a,b,c) before mix_hash() is
76 // still in (a,b,c) after mix_hash().
77 //
78 // If four pairs of (a,b,c) inputs are run through mix_hash(), or through
79 // mix_hash() in reverse, there are at least 32 bits of the output that
80 // are sometimes the same for one pair and different for another pair.
81 // This was tested for:
82 // * pairs that differed by one bit, by two bits, in any combination
83 // of top bits of (a,b,c), or in any combination of bottom bits of
84 // (a,b,c).
85 // * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
86 // the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
87 // is commonly produced by subtraction) look like a single 1-bit
88 // difference.
89 // * the base values were pseudorandom, all zero but one bit set, or
90 // all zero plus a counter that starts at zero.
91 //
92 // Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
93 // satisfy this are
94 // 4 6 8 16 19 4
95 // 9 15 3 18 27 15
96 // 14 9 3 7 17 3
97 // Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
98 // for "differ" defined as + with a one-bit base and a two-bit delta. I
99 // used http://burtleburtle.net/bob/hash/avalanche.html to choose
100 // the operations, constants, and arrangements of the variables.
101 //
102 // This does not achieve avalanche. There are input bits of (a,b,c)
103 // that fail to affect some output bits of (a,b,c), especially of a. The
104 // most thoroughly mixed value is c, but it doesn't really even achieve
105 // avalanche in c.
106 //
107 // This allows some parallelism. Read-after-writes are good at doubling
108 // the number of bits affected, so the goal of mixing pulls in the opposite
109 // direction as the goal of parallelism. I did what I could. Rotates
110 // seem to cost as much as shifts on every machine I could lay my hands
111 // on, and rotates are much kinder to the top and bottom bits, so I used
112 // rotates.
113 // ----------------------------------------------------------------------------
114 
115 #define bitMixer(a, b, c) \
116  { \
117  a -= c; a ^= bitRotateLeft(c, 4); c += b; \
118  b -= a; b ^= bitRotateLeft(a, 6); a += c; \
119  c -= b; c ^= bitRotateLeft(b, 8); b += a; \
120  a -= c; a ^= bitRotateLeft(c,16); c += b; \
121  b -= a; b ^= bitRotateLeft(a,19); a += c; \
122  c -= b; c ^= bitRotateLeft(b, 4); b += a; \
123  }
124 
125 
126 // ----------------------------------------------------------------------------
127 // final -- final mixing of 3 32-bit values (a,b,c) into c
128 //
129 // Pairs of (a,b,c) values differing in only a few bits will usually
130 // produce values of c that look totally different. This was tested for
131 // * pairs that differed by one bit, by two bits, in any combination
132 // of top bits of (a,b,c), or in any combination of bottom bits of
133 // (a,b,c).
134 // * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
135 // the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
136 // is commonly produced by subtraction) look like a single 1-bit
137 // difference.
138 // * the base values were pseudorandom, all zero but one bit set, or
139 // all zero plus a counter that starts at zero.
140 //
141 // These constants passed:
142 // 14 11 25 16 4 14 24
143 // 12 14 25 16 4 14 24
144 // and these came close:
145 // 4 8 15 26 3 22 24
146 // 10 8 15 26 3 22 24
147 // 11 8 15 26 3 22 24
148 // ----------------------------------------------------------------------------
149 
150 #define bitMixerFinal(a, b, c) \
151  { \
152  c ^= b; c -= bitRotateLeft(b, 14); \
153  a ^= c; a -= bitRotateLeft(c, 11); \
154  b ^= a; b -= bitRotateLeft(a, 25); \
155  c ^= b; c -= bitRotateLeft(b, 16); \
156  a ^= c; a -= bitRotateLeft(c, 4); \
157  b ^= a; b -= bitRotateLeft(a, 14); \
158  c ^= b; c -= bitRotateLeft(b, 24); \
159  }
160 
161 
162 // * * * * * * * * * * * * * * Static Functions * * * * * * * * * * * * * * //
163 
164 // ----------------------------------------------------------------------------
165 // hashlittle() -- hash a variable-length key into a 32-bit value
166 // k : the key (the unaligned variable-length array of bytes)
167 // length : the length of the key, counting by bytes
168 // initval : can be any 4-byte value
169 // Returns a 32-bit value. Every bit of the key affects every bit of
170 // the return value. Two keys differing by one or two bits will have
171 // totally different hash values.
172 //
173 // The best hash table sizes are powers of 2. There is no need to do
174 // mod a prime (mod is sooo slow!). If you need less than 32 bits,
175 // use a bitmask. For example, if you need only 10 bits, do
176 // h = (h & hashmask(10));
177 // In which case, the hash table should have hashsize(10) elements.
178 //
179 // If you are hashing n strings (uint8_t **)k, do it like this:
180 // for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
181 //
182 // By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
183 // code any way you wish, private, educational, or commercial. It's free.
184 //
185 // Use for hash table lookup, or anything where one collision in 2^^32 is
186 // acceptable. Do NOT use for cryptographic purposes.
187 // ----------------------------------------------------------------------------
188 
189 // Specialized little-endian code
190 #ifdef WM_LITTLE_ENDIAN
191 static unsigned jenkins_hashlittle
192 (
193  const void *key,
194  size_t length,
195  unsigned initval
196 )
197 {
198  uint32_t a, b, c;
199  union { const void *ptr; size_t i; } u; // to cast key to (size_t) happily
200 
201  // Set up the internal state
202  a = b = c = 0xdeadbeef + static_cast<uint32_t>(length) + initval;
203 
204  u.ptr = key;
205  if ((u.i & 0x3) == 0)
206  {
207  // 32-bit chunks
208  const uint32_t *k = reinterpret_cast<const uint32_t*>(key);
209 
210  // all but last block: aligned reads and affect 32 bits of (a,b,c)
211  while (length > 12)
212  {
213  a += k[0];
214  b += k[1];
215  c += k[2];
216  bitMixer(a,b,c);
217  length -= 12;
218  k += 3;
219  }
220 
221  // handle the last (probably partial) block byte-wise
222  const uint8_t *k8 = reinterpret_cast<const uint8_t*>(k);
223  switch (length)
224  {
225  case 12: c += k[2]; b += k[1]; a += k[0]; break;
226  case 11: c += static_cast<uint32_t>(k8[10]) << 16; [[fallthrough]];
227  case 10: c += static_cast<uint32_t>(k8[9]) << 8; [[fallthrough]];
228  case 9 : c += k8[8]; [[fallthrough]];
229  case 8 : b += k[1]; a += k[0]; break;
230  case 7 : b += static_cast<uint32_t>(k8[6]) << 16; [[fallthrough]];
231  case 6 : b += static_cast<uint32_t>(k8[5]) << 8; [[fallthrough]];
232  case 5 : b += k8[4]; [[fallthrough]];
233  case 4 : a += k[0]; break;
234  case 3 : a += static_cast<uint32_t>(k8[2]) << 16; [[fallthrough]];
235  case 2 : a += static_cast<uint32_t>(k8[1]) << 8; [[fallthrough]];
236  case 1 : a += k8[0]; break;
237  case 0 : return c; // zero-length requires no mixing
238  }
239  }
240  else if ((u.i & 0x1) == 0)
241  {
242  // 16-bit chunks
243  const uint16_t *k = reinterpret_cast<const uint16_t*>(key);
244 
245  // all but last block: aligned reads and different mixing
246  while (length > 12)
247  {
248  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
249  b += k[2] + (static_cast<uint32_t>(k[3]) << 16);
250  c += k[4] + (static_cast<uint32_t>(k[5]) << 16);
251  bitMixer(a,b,c);
252  length -= 12;
253  k += 6;
254  }
255 
256  // handle the last (probably partial) block
257  const uint8_t *k8 = reinterpret_cast<const uint8_t*>(k);
258  switch (length)
259  {
260  case 12:
261  c += k[4] + (static_cast<uint32_t>(k[5]) << 16);
262  b += k[2] + (static_cast<uint32_t>(k[3]) << 16);
263  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
264  break;
265  case 11:
266  c += static_cast<uint32_t>(k8[10]) << 16;
267  [[fallthrough]];
268  case 10:
269  c += k[4];
270  b += k[2] + (static_cast<uint32_t>(k[3]) << 16);
271  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
272  break;
273  case 9 :
274  c += k8[8];
275  [[fallthrough]];
276  case 8 :
277  b += k[2] + (static_cast<uint32_t>(k[3]) << 16);
278  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
279  break;
280  case 7 :
281  b += static_cast<uint32_t>(k8[6]) << 16;
282  [[fallthrough]];
283  case 6 :
284  b += k[2];
285  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
286  break;
287  case 5 :
288  b += k8[4];
289  [[fallthrough]];
290  case 4 :
291  a += k[0] + (static_cast<uint32_t>(k[1]) << 16);
292  break;
293  case 3 :
294  a += static_cast<uint32_t>(k8[2]) << 16;
295  [[fallthrough]];
296  case 2 :
297  a += k[0];
298  break;
299  case 1 :
300  a += k8[0];
301  break;
302  case 0 : return c; // zero-length requires no mixing
303  }
304  }
305  else
306  {
307  const uint8_t *k = reinterpret_cast<const uint8_t*>(key);
308 
309  // all but the last block: affect some 32 bits of (a,b,c)
310  while (length > 12)
311  {
312  a += k[0];
313  a += static_cast<uint32_t>(k[1]) << 8;
314  a += static_cast<uint32_t>(k[2]) << 16;
315  a += static_cast<uint32_t>(k[3]) << 24;
316  b += k[4];
317  b += static_cast<uint32_t>(k[5]) << 8;
318  b += static_cast<uint32_t>(k[6]) << 16;
319  b += static_cast<uint32_t>(k[7]) << 24;
320  c += k[8];
321  c += static_cast<uint32_t>(k[9]) << 8;
322  c += static_cast<uint32_t>(k[10]) << 16;
323  c += static_cast<uint32_t>(k[11]) << 24;
324 
325  bitMixer(a,b,c);
326  length -= 12;
327  k += 12;
328  }
329 
330  // last block: affect all 32 bits of (c)
331  switch (length) // most case statements fall through
332  {
333  case 12: c += static_cast<uint32_t>(k[11]) << 24; [[fallthrough]];
334  case 11: c += static_cast<uint32_t>(k[10]) << 16; [[fallthrough]];
335  case 10: c += static_cast<uint32_t>(k[9]) << 8; [[fallthrough]];
336  case 9 : c += k[8]; [[fallthrough]];
337 
338  case 8 : b += static_cast<uint32_t>(k[7]) << 24; [[fallthrough]];
339  case 7 : b += static_cast<uint32_t>(k[6]) << 16; [[fallthrough]];
340  case 6 : b += static_cast<uint32_t>(k[5]) << 8; [[fallthrough]];
341  case 5 : b += k[4]; [[fallthrough]];
342 
343  case 4 : a += static_cast<uint32_t>(k[3]) << 24; [[fallthrough]];
344  case 3 : a += static_cast<uint32_t>(k[2]) << 16; [[fallthrough]];
345  case 2 : a += static_cast<uint32_t>(k[1]) << 8; [[fallthrough]];
346  case 1 : a += k[0];
347  break;
348 
349  case 0 : return c;
350  }
351  }
352 
353  bitMixerFinal(a,b,c);
354  return c;
355 }
356 #endif
357 
358 
359 // ----------------------------------------------------------------------------
360 // hashbig():
361 // This is the same as hashword() on big-endian machines. It is different
362 // from hashlittle() on all machines. hashbig() takes advantage of
363 // big-endian byte ordering.
364 // ----------------------------------------------------------------------------
365 // specialized big-endian code
366 #ifdef WM_BIG_ENDIAN
367 static unsigned jenkins_hashbig
368 (
369  const void *key,
370  size_t length,
371  unsigned initval
372 )
373 {
374  uint32_t a, b, c;
375  union { const void *ptr; size_t i; } u; // to cast key to (size_t) happily
376 
377  // Set up the internal state
378  a = b = c = 0xdeadbeef + static_cast<uint32_t>(length) + initval;
379 
380  u.ptr = key;
381  if ((u.i & 0x3) == 0)
382  {
383  // 32-bit chunks
384  const uint32_t *k = reinterpret_cast<const uint32_t*>(key);
385 
386  // all but last block: aligned reads and affect 32 bits of (a,b,c)
387  while (length > 12)
388  {
389  a += k[0];
390  b += k[1];
391  c += k[2];
392  bitMixer(a,b,c);
393  length -= 12;
394  k += 3;
395  }
396 
397  // handle the last (probably partial) block byte-wise
398  const uint8_t *k8 = reinterpret_cast<const uint8_t*>(k);
399 
400  switch (length) // most the case statements fall through
401  {
402  case 12: c += k[2]; b += k[1]; a += k[0]; break;
403  case 11: c += static_cast<uint32_t>(k8[10]) << 8; [[fallthrough]];
404  case 10: c += static_cast<uint32_t>(k8[9]) << 16; [[fallthrough]];
405  case 9 : c += static_cast<uint32_t>(k8[8]) << 24; [[fallthrough]];
406  case 8 : b += k[1]; a += k[0]; break;
407  case 7 : b += static_cast<uint32_t>(k8[6]) << 8; [[fallthrough]];
408  case 6 : b += static_cast<uint32_t>(k8[5]) << 16; [[fallthrough]];
409  case 5 : b += static_cast<uint32_t>(k8[4]) << 24; [[fallthrough]];
410  case 4 : a += k[0]; break;
411  case 3 : a += static_cast<uint32_t>(k8[2]) << 8; [[fallthrough]];
412  case 2 : a += static_cast<uint32_t>(k8[1]) << 16; [[fallthrough]];
413  case 1 : a += static_cast<uint32_t>(k8[0]) << 24; break;
414  case 0 : return c;
415  }
416  }
417  else
418  {
419  // need to read the key one byte at a time
420  const uint8_t *k = reinterpret_cast<const uint8_t*>(key);
421 
422  // all but the last block: affect some 32 bits of (a,b,c)
423  while (length > 12)
424  {
425  a += static_cast<uint32_t>(k[0]) << 24;
426  a += static_cast<uint32_t>(k[1]) << 16;
427  a += static_cast<uint32_t>(k[2]) << 8;
428  a += static_cast<uint32_t>(k[3]);
429  b += static_cast<uint32_t>(k[4]) << 24;
430  b += static_cast<uint32_t>(k[5]) << 16;
431  b += static_cast<uint32_t>(k[6]) << 8;
432  b += static_cast<uint32_t>(k[7]);
433  c += static_cast<uint32_t>(k[8]) << 24;
434  c += static_cast<uint32_t>(k[9]) << 16;
435  c += static_cast<uint32_t>(k[10]) << 8;
436  c += static_cast<uint32_t>(k[11]);
437 
438  bitMixer(a,b,c);
439  length -= 12;
440  k += 12;
441  }
442 
443  // last block: affect all 32 bits of (c)
444  switch (length) // the case statements fall through
445  {
446  case 12: c += k[11]; [[fallthrough]];
447  case 11: c += static_cast<uint32_t>(k[10]) << 8; [[fallthrough]];
448  case 10: c += static_cast<uint32_t>(k[9]) << 16; [[fallthrough]];
449  case 9 : c += static_cast<uint32_t>(k[8]) << 24; [[fallthrough]];
450  case 8 : b += k[7]; [[fallthrough]];
451  case 7 : b += static_cast<uint32_t>(k[6]) << 8; [[fallthrough]];
452  case 6 : b += static_cast<uint32_t>(k[5]) << 16; [[fallthrough]];
453  case 5 : b += static_cast<uint32_t>(k[4]) << 24; [[fallthrough]];
454  case 4 : a += k[3]; [[fallthrough]];
455  case 3 : a += static_cast<uint32_t>(k[2]) << 8; [[fallthrough]];
456  case 2 : a += static_cast<uint32_t>(k[1]) << 16; [[fallthrough]];
457  case 1 : a += static_cast<uint32_t>(k[0]) << 24; [[fallthrough]];
458  break;
459  case 0 : return c;
460  }
461  }
462 
463  bitMixerFinal(a,b,c);
464  return c;
465 }
466 #endif
467 
468 
469 // * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
470 
471 
472 unsigned Foam::Hasher
473 (
474  const void *key,
475  size_t length,
476  unsigned initval
477 )
478 {
479 #if defined (WM_BIG_ENDIAN)
480  return jenkins_hashbig(key, length, initval);
481 #elif defined (WM_LITTLE_ENDIAN)
482  return jenkins_hashlittle(key, length, initval);
483 #else
484  #error "Cannot determine WM_BIG_ENDIAN or WM_LITTLE_ENDIAN."
485 #endif
486 }
487 
488 
489 // ----------------------------------------------------------------------------
490 // This works on all machines. To be useful, it requires
491 // -- that the key be an array of uint32_t's, and
492 // -- that the length be the number of uint32_t's in the key
493 //
494 // The function hashword() is identical to hashlittle() on little-endian
495 // machines, and identical to hashbig() on big-endian machines,
496 // except that the length has to be measured in uint32_ts rather than in
497 // bytes. hashlittle() is more complicated than hashword() only because
498 // hashlittle() has to dance around fitting the key bytes into registers.
499 // ----------------------------------------------------------------------------
500 unsigned Foam::HasherInt
501 (
502  const uint32_t *k,
503  size_t length,
504  unsigned seed
505 )
506 {
507  uint32_t a, b, c;
508 
509  // Set up the internal state
510  a = b = c = 0xdeadbeef + (static_cast<uint32_t>(length) << 2) + seed;
511 
512  // handle most of the key
513  while (length > 3)
514  {
515  a += k[0];
516  b += k[1];
517  c += k[2];
518  bitMixer(a,b,c);
519  length -= 3;
520  k += 3;
521  }
522 
523  // handle the last 3 uint32_t's
524  switch (length) // all case statements fall through
525  {
526  case 3 : c += k[2]; [[fallthrough]];
527  case 2 : b += k[1]; [[fallthrough]];
528  case 1 : a += k[0];
529  bitMixerFinal(a,b,c);
530  [[fallthrough]];
531  case 0 : // case 0: nothing left to add
532  break;
533  }
534 
535  return c;
536 }
537 
538 
539 // ----------------------------------------------------------------------------
540 // hashword2() -- same as hashword(), but take two seeds and return two
541 // 32-bit values. pc and pb must both be non-null, and *pc and *pb must
542 // both be initialized with seeds. If you pass in (*pb)==0, the output
543 // (*pc) will be the same as the return value from hashword().
544 // ----------------------------------------------------------------------------
545 unsigned Foam::HasherDual
546 (
547  const uint32_t *k,
548  size_t length,
549  unsigned& hash1, // IN: seed OUT: primary hash value
550  unsigned& hash2 // IN: more seed OUT: secondary hash value
551 )
552 {
553  uint32_t a, b, c;
554 
555  // Set up the internal state
556  a = b = c = 0xdeadbeef + (static_cast<uint32_t>(length) << 2) + hash1;
557  c += hash2;
558 
559  // handle most of the key
560  while (length > 3)
561  {
562  a += k[0];
563  b += k[1];
564  c += k[2];
565  bitMixer(a,b,c);
566  length -= 3;
567  k += 3;
568  }
569 
570  // handle the last 3 uint32_t's
571  switch (length) // all case statements fall through
572  {
573  case 3 : c += k[2]; [[fallthrough]];
574  case 2 : b += k[1]; [[fallthrough]];
575  case 1 : a += k[0];
576  bitMixerFinal(a,b,c);
577  [[fallthrough]];
578  case 0 : // case 0: nothing left to add
579  break;
580  }
581 
582  // report the result
583  hash1 = c;
584  hash2 = b;
585 
586  // return primary hash value
587  return c;
588 }
589 
590 
591 // ************************************************************************* //
HasherInt.H
Optimized hashing functions.
Foam::HasherDual
unsigned HasherDual(const uint32_t *data, size_t length, unsigned &hash1, unsigned &hash2)
An optimized version of Hasher, returning dual hash values.
Definition: Hasher.C:546
bitMixer
#define bitMixer(a, b, c)
Definition: Hasher.C:115
Foam::Hasher
unsigned Hasher(const void *data, size_t len, unsigned seed=0)
Bob Jenkins's 96-bit mixer hashing function (lookup3)
Definition: Hasher.C:473
bitMixerFinal
#define bitMixerFinal(a, b, c)
Definition: Hasher.C:150
Foam::constant::physicoChemical::b
const dimensionedScalar b
Wien displacement law constant: default SI units: [m.K].
Definition: createFields.H:27
Hasher.H
Misc. hashing functions, mostly from Bob Jenkins.
Foam::HasherInt
unsigned HasherInt(const uint32_t *data, size_t length, unsigned seed=0)
An optimized version of Hasher.
Definition: Hasher.C:501
k
label k
Boltzmann constant.
Definition: LISASMDCalcMethod2.H:41
Foam::constant::universal::c
const dimensionedScalar c
Speed of light in a vacuum.
endian.H
Help with architecture-specific aspects.