Project

General

Profile

city.patch

vmakarov (Vladimir Makarov), 04/29/2016 09:55 PM

Download (19.4 KB)

View differences:

st.c
1603 1603
		st_data_t never ATTRIBUTE_UNUSED) {
1604 1604
    return st_general_values(tab, values, size);
1605 1605
}
1606
/*
1607
 * hash_32 - 32 bit Fowler/Noll/Vo FNV-1a hash code
1608
 *
1609
 * @(#) $Hash32: Revision: 1.1 $
1610
 * @(#) $Hash32: Id: hash_32a.c,v 1.1 2003/10/03 20:38:53 chongo Exp $
1611
 * @(#) $Hash32: Source: /usr/local/src/cmd/fnv/RCS/hash_32a.c,v $
1612
 *
1613
 ***
1614
 *
1615
 * Fowler/Noll/Vo hash
1616
 *
1617
 * The basis of this hash algorithm was taken from an idea sent
1618
 * as reviewer comments to the IEEE POSIX P1003.2 committee by:
1619
 *
1620
 *      Phong Vo (http://www.research.att.com/info/kpv/)
1621
 *      Glenn Fowler (http://www.research.att.com/~gsf/)
1622
 *
1623
 * In a subsequent ballot round:
1624
 *
1625
 *      Landon Curt Noll (http://www.isthe.com/chongo/)
1626
 *
1627
 * improved on their algorithm.  Some people tried this hash
1628
 * and found that it worked rather well.  In an EMail message
1629
 * to Landon, they named it the ``Fowler/Noll/Vo'' or FNV hash.
1630
 *
1631
 * FNV hashes are designed to be fast while maintaining a low
1632
 * collision rate. The FNV speed allows one to quickly hash lots
1633
 * of data while maintaining a reasonable collision rate.  See:
1634
 *
1635
 *      http://www.isthe.com/chongo/tech/comp/fnv/index.html
1636
 *
1637
 * for more details as well as other forms of the FNV hash.
1638
 ***
1639
 *
1640
 * To use the recommended 32 bit FNV-1a hash, pass FNV1_32A_INIT as the
1641
 * Fnv32_t hashval argument to fnv_32a_buf() or fnv_32a_str().
1642
 *
1643
 ***
1644
 *
1645
 * Please do not copyright this code.  This code is in the public domain.
1646
 *
1647
 * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
1648
 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
1649
 * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
1650
 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
1651
 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
1652
 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
1653
 * PERFORMANCE OF THIS SOFTWARE.
1654
 *
1655
 * By:
1656
 *	chongo <Landon Curt Noll> /\oo/\
1657
 *      http://www.isthe.com/chongo/
1658
 *
1659
 * Share and Enjoy!	:-)
1660
 */
1661 1606

  
1662
/*
1663
 * 32 bit FNV-1 and FNV-1a non-zero initial basis
1664
 *
1665
 * The FNV-1 initial basis is the FNV-0 hash of the following 32 octets:
1666
 *
1667
 *              chongo <Landon Curt Noll> /\../\
1668
 *
1669
 * NOTE: The \'s above are not back-slashing escape characters.
1670
 * They are literal ASCII  backslash 0x5c characters.
1671
 *
1672
 * NOTE: The FNV-1a initial basis is the same value as FNV-1 by definition.
1673
 */
1674
#define FNV1_32A_INIT 0x811c9dc5
1607

1608

  
1609
/* Copyright (c) 2011 Google, Inc.
1610
  
1611
   Permission is hereby granted, free of charge, to any person obtaining a copy
1612
   of this software and associated documentation files (the "Software"), to deal
1613
   in the Software without restriction, including without limitation the rights
1614
   to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1615
   copies of the Software, and to permit persons to whom the Software is
1616
   furnished to do so, subject to the following conditions:
1617
  
1618
   The above copyright notice and this permission notice shall be included in
1619
   all copies or substantial portions of the Software.
1620
  
1621
   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1622
   IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1623
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1624
   AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1625
   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1626
   OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1627
   THE SOFTWARE.
1628
  
1629
   CityHash, by Geoff Pike and Jyrki Alakuijala
1630
  
1631
   This file provides CityHash64() and related functions.
1632
  
1633
   It's probably possible to create even faster hash functions by
1634
   writing a program that systematically explores some of the space of
1635
   possible hash functions, by using SIMD instructions, or by
1636
   compromising on hash quality.  */
1637

  
1638
static inline uint64_t Uint128Low64(uint64_t x, uint64_t y) { return x; }
1639
static inline uint64_t Uint128High64(uint64_t x, uint64_t y) { return y; }
1640

  
1641
/* Hash 128 input bits down to 64 bits of output.  This is intended to
1642
   be a reasonably good hash function. */
1643
static inline uint64_t
1644
Hash128to64(uint64_t first, uint64_t second) {
1645
    /* Murmur-inspired hashing. */
1646
    const uint64_t kMul = 0x9ddfea08eb382d69ULL;
1647
    uint64_t b, a = (Uint128Low64(first, second) ^ Uint128High64(first, second)) * kMul;
1648

  
1649
    a ^= (a >> 47);
1650
    b = (Uint128High64(first, second) ^ a) * kMul;
1651
    b ^= (b >> 47);
1652
    b *= kMul;
1653
    return b;
1654
}
1655

  
1656
static uint64_t
1657
UNALIGNED_LOAD64(const char *p) {
1658
    uint64_t result;
1659
    
1660
    memcpy(&result, p, sizeof(result));
1661
    return result;
1662
}
1675 1663

  
1676
/*
1677
 * 32 bit magic FNV-1a prime
1678
 */
1679
#define FNV_32_PRIME 0x01000193
1664
static uint32_t
1665
UNALIGNED_LOAD32(const char *p) {
1666
    uint32_t result;
1667
    
1668
    memcpy(&result, p, sizeof(result));
1669
    return result;
1670
}
1680 1671

  
1681
#ifdef ST_USE_FNV1
1682
static st_index_t
1683
strhash(st_data_t arg)
1684
{
1685
    register const char *string = (const char *)arg;
1686
    register st_index_t hval = FNV1_32A_INIT;
1672
#ifndef __BIG_ENDIAN__
1687 1673

  
1688
    /*
1689
     * FNV-1a hash each octet in the buffer
1690
     */
1691
    while (*string) {
1692
	/* xor the bottom with the current octet */
1693
	hval ^= (unsigned int)*string++;
1674
#define uint32_in_expected_order(x) (x)
1675
#define uint64_in_expected_order(x) (x)
1694 1676

  
1695
	/* multiply by the 32 bit FNV magic prime mod 2^32 */
1696
	hval *= FNV_32_PRIME;
1697
    }
1698
    return hval;
1699
}
1700 1677
#else
1701 1678

  
1702
#if !defined(UNALIGNED_WORD_ACCESS) && defined(__GNUC__) && __GNUC__ >= 6
1703
# define UNALIGNED_WORD_ACCESS 0
1704
#endif
1679
#ifdef _MSC_VER
1680
#include <stdlib.h>
1681
#define bswap_32(x) _byteswap_ulong(x)
1682
#define bswap_64(x) _byteswap_uint64_t(x)
1705 1683

  
1706
#ifndef UNALIGNED_WORD_ACCESS
1707
# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
1708
     defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
1709
     defined(__powerpc64__) || \
1710
     defined(__mc68020__)
1711
#   define UNALIGNED_WORD_ACCESS 1
1712
# endif
1713
#endif
1714
#ifndef UNALIGNED_WORD_ACCESS
1715
# define UNALIGNED_WORD_ACCESS 0
1716
#endif
1684
#elif defined(__APPLE__)
1685
/* Mac OS X / Darwin features: */
1686
#include <libkern/OSByteOrder.h>
1687
#define bswap_32(x) OSSwapInt32(x)
1688
#define bswap_64(x) OSSwapInt64(x)
1717 1689

  
1718
/* MurmurHash described in http://murmurhash.googlepages.com/ */
1719
#ifndef MURMUR
1720
#define MURMUR 2
1690
#else
1691
#include <byteswap.h>
1721 1692
#endif
1722 1693

  
1723
#define MurmurMagic_1 (st_index_t)0xc6a4a793
1724
#define MurmurMagic_2 (st_index_t)0x5bd1e995
1725
#if MURMUR == 1
1726
#define MurmurMagic MurmurMagic_1
1727
#elif MURMUR == 2
1728
#if SIZEOF_ST_INDEX_T > 4
1729
#define MurmurMagic ((MurmurMagic_1 << 32) | MurmurMagic_2)
1694
#define uint32_in_expected_order(x) (bswap_32(x))
1695
#define uint64_in_expected_order(x) (bswap_64(x))
1696

  
1697
#endif  /* __BIG_ENDIAN__ */
1698

  
1699
#if !defined(LIKELY)
1700
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
1701
#define LIKELY(x) (__builtin_expect(!!(x), 1))
1730 1702
#else
1731
#define MurmurMagic MurmurMagic_2
1703
#define LIKELY(x) (x)
1732 1704
#endif
1733 1705
#endif
1734 1706

  
1735
static inline st_index_t
1736
murmur(st_index_t h, st_index_t k, int r)
1737
{
1738
    const st_index_t m = MurmurMagic;
1739
#if MURMUR == 1
1740
    h += k;
1741
    h *= m;
1742
    h ^= h >> r;
1743
#elif MURMUR == 2
1744
    k *= m;
1745
    k ^= k >> r;
1746
    k *= m;
1747

  
1748
    h *= m;
1749
    h ^= k;
1750
#endif
1751
    return h;
1707
static uint64_t
1708
Fetch64(const char *p) {
1709
    return uint64_in_expected_order(UNALIGNED_LOAD64(p));
1752 1710
}
1753 1711

  
1754
static inline st_index_t
1755
murmur_finish(st_index_t h)
1756
{
1757
#if MURMUR == 1
1758
    h = murmur(h, 0, 10);
1759
    h = murmur(h, 0, 17);
1760
#elif MURMUR == 2
1761
    h ^= h >> 13;
1762
    h *= MurmurMagic;
1763
    h ^= h >> 15;
1764
#endif
1765
    return h;
1712
static uint32_t
1713
Fetch32(const char *p) {
1714
    return uint32_in_expected_order(UNALIGNED_LOAD32(p));
1766 1715
}
1767 1716

  
1768
#define murmur_step(h, k) murmur((h), (k), 16)
1717
/* Some primes between 2^63 and 2^64 for various uses. */
1718
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
1719
static const uint64_t k1 = 0xb492b66fbe98f273ULL;
1720
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
1721
static const uint64_t k3 = 0xc949d7c7509e6557ULL;
1769 1722

  
1770
#if MURMUR == 1
1771
#define murmur1(h) murmur_step((h), 16)
1772
#else
1773
#define murmur1(h) murmur_step((h), 24)
1774
#endif
1775

  
1776
st_index_t
1777
st_hash(const void *ptr, size_t len, st_index_t h)
1778
{
1779
    const char *data = ptr;
1780
    st_index_t t = 0;
1781

  
1782
    h += 0xdeadbeef;
1783

  
1784
#define data_at(n) (st_index_t)((unsigned char)data[(n)])
1785
#define UNALIGNED_ADD_4 UNALIGNED_ADD(2); UNALIGNED_ADD(1); UNALIGNED_ADD(0)
1786
#if SIZEOF_ST_INDEX_T > 4
1787
#define UNALIGNED_ADD_8 UNALIGNED_ADD(6); UNALIGNED_ADD(5); UNALIGNED_ADD(4); UNALIGNED_ADD(3); UNALIGNED_ADD_4
1788
#if SIZEOF_ST_INDEX_T > 8
1789
#define UNALIGNED_ADD_16 UNALIGNED_ADD(14); UNALIGNED_ADD(13); UNALIGNED_ADD(12); UNALIGNED_ADD(11); \
1790
    UNALIGNED_ADD(10); UNALIGNED_ADD(9); UNALIGNED_ADD(8); UNALIGNED_ADD(7); UNALIGNED_ADD_8
1791
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_16
1792
#endif
1793
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_8
1794
#else
1795
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_4
1796
#endif
1797
    if (len >= sizeof(st_index_t)) {
1798
#if !UNALIGNED_WORD_ACCESS
1799
	int align = (int)((st_data_t)data % sizeof(st_index_t));
1800
	if (align) {
1801
	    st_index_t d = 0;
1802
	    int sl, sr, pack;
1803

  
1804
	    switch (align) {
1805
#ifdef WORDS_BIGENDIAN
1806
# define UNALIGNED_ADD(n) case SIZEOF_ST_INDEX_T - (n) - 1: \
1807
		t |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 2)
1808
#else
1809
# define UNALIGNED_ADD(n) case SIZEOF_ST_INDEX_T - (n) - 1:	\
1810
		t |= data_at(n) << CHAR_BIT*(n)
1811
#endif
1812
		UNALIGNED_ADD_ALL;
1813
#undef UNALIGNED_ADD
1814
	    }
1815

  
1816
#ifdef WORDS_BIGENDIAN
1817
	    t >>= (CHAR_BIT * align) - CHAR_BIT;
1818
#else
1819
	    t <<= (CHAR_BIT * align);
1820
#endif
1821

  
1822
	    data += sizeof(st_index_t)-align;
1823
	    len -= sizeof(st_index_t)-align;
1723
/* Bitwise right rotate.  Normally this will compile to a single
1724
   instruction, especially if the shift is a manifest constant.  */
1725
static uint64_t
1726
Rotate(uint64_t val, int shift) {
1727
  /* Avoid shifting by 64: doing so yields an undefined result.  */
1728
  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
1729
}
1824 1730

  
1825
	    sl = CHAR_BIT * (SIZEOF_ST_INDEX_T-align);
1826
	    sr = CHAR_BIT * align;
1731
/* Equivalent to Rotate(), but requires the second arg to be non-zero.
1732
   On x86-64, and probably others, it's possible for this to compile
1733
   to a single instruction if both args are already in registers.  */
1734
static uint64_t
1735
RotateByAtLeast1(uint64_t val, int shift) {
1736
    return (val >> shift) | (val << (64 - shift));
1737
}
1827 1738

  
1828
	    while (len >= sizeof(st_index_t)) {
1829
		d = *(st_index_t *)data;
1830
#ifdef WORDS_BIGENDIAN
1831
		t = (t << sr) | (d >> sl);
1832
#else
1833
		t = (t >> sr) | (d << sl);
1834
#endif
1835
		h = murmur_step(h, t);
1836
		t = d;
1837
		data += sizeof(st_index_t);
1838
		len -= sizeof(st_index_t);
1839
	    }
1739
static uint64_t
1740
ShiftMix(uint64_t val) {
1741
    return val ^ (val >> 47);
1742
}
1840 1743

  
1841
	    pack = len < (size_t)align ? (int)len : align;
1842
	    d = 0;
1843
	    switch (pack) {
1844
#ifdef WORDS_BIGENDIAN
1845
# define UNALIGNED_ADD(n) case (n) + 1: \
1846
		d |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 1)
1847
#else
1848
# define UNALIGNED_ADD(n) case (n) + 1: \
1849
		d |= data_at(n) << CHAR_BIT*(n)
1850
#endif
1851
		UNALIGNED_ADD_ALL;
1852
#undef UNALIGNED_ADD
1853
	    }
1854
#ifdef WORDS_BIGENDIAN
1855
	    t = (t << sr) | (d >> sl);
1856
#else
1857
	    t = (t >> sr) | (d << sl);
1858
#endif
1744
static uint64_t
1745
HashLen16(uint64_t u, uint64_t v) {
1746
    return Hash128to64(u, v);
1747
}
1859 1748

  
1860
#if MURMUR == 2
1861
	    if (len < (size_t)align) goto skip_tail;
1862
#endif
1863
	    h = murmur_step(h, t);
1864
	    data += pack;
1865
	    len -= pack;
1866
	}
1867
	else
1868
#endif
1869
	{
1870
	    do {
1871
		h = murmur_step(h, *(st_index_t *)data);
1872
		data += sizeof(st_index_t);
1873
		len -= sizeof(st_index_t);
1874
	    } while (len >= sizeof(st_index_t));
1749
static uint64_t
1750
HashLen0to16(const char *s, size_t len) {
1751
    if (len > 8) {
1752
        uint64_t a = Fetch64(s);
1753
	uint64_t b = Fetch64(s + len - 8);
1754
	return HashLen16(a, RotateByAtLeast1(b + len, len)) ^ b;
1755
    }
1756
    if (len >= 4) {
1757
        uint64_t a = Fetch32(s);
1758
	return HashLen16(len + (a << 3), Fetch32(s + len - 4));
1759
    }
1760
    if (len > 0) {
1761
        uint8_t a = s[0];
1762
	uint8_t b = s[len >> 1];
1763
	uint8_t c = s[len - 1];
1764
	uint32_t y = ((uint32_t)(a) + (uint32_t)(b) << 8);
1765
	uint32_t z = len + ((uint32_t)(c) << 2);
1766
	return ShiftMix(y * k2 ^ z * k3) * k2;
1767
    }
1768
    return k2;
1769
}
1770

  
1771
/* This probably works well for 16-byte strings as well, but it may be
1772
   overkill in that case.  */
1773
static uint64_t
1774
HashLen17to32(const char *s, size_t len) {
1775
    uint64_t a = Fetch64(s) * k1;
1776
    uint64_t b = Fetch64(s + 8);
1777
    uint64_t c = Fetch64(s + len - 8) * k2;
1778
    uint64_t d = Fetch64(s + len - 16) * k0;
1779
    return HashLen16(Rotate(a - b, 43) + Rotate(c, 30) + d,
1780
		     a + Rotate(b ^ k3, 20) - c + len);
1781
}
1782

  
1783
typedef struct pair64 {uint64_t first, second;} pair64;
1784

  
1785
/* Return a 16-byte hash for 48 bytes.  Quick and dirty.
1786
   Callers do best to use "random-looking" values for a and b.  */
1787
static pair64
1788
WeakHashLen32WithSeeds0(uint64_t w, uint64_t x, uint64_t y, uint64_t z,
1789
			uint64_t a, uint64_t b) {
1790
    pair64 res;
1791
    uint64_t c;
1792
    a += w;
1793
    b = Rotate(b + a + z, 21);
1794
    c = a;
1795
    a += x;
1796
    a += y;
1797
    b += Rotate(a, 44);
1798
    res.first = a + z; res.second = b + c;
1799
    return res;
1800
}
1801

  
1802
/* Return a 16-byte hash for s[0] ... s[31], a, and b.  Quick and dirty.  */
1803
static pair64
1804
WeakHashLen32WithSeeds(const char* s, uint64_t a, uint64_t b) {
1805
    return WeakHashLen32WithSeeds0(Fetch64(s),
1806
				   Fetch64(s + 8),
1807
				   Fetch64(s + 16),
1808
				   Fetch64(s + 24),
1809
				   a,
1810
				   b);
1811
}
1812

  
1813
/* Return an 8-byte hash for 33 to 64 bytes.  */
1814
static uint64_t
1815
HashLen33to64(const char *s, size_t len) {
1816
    uint64_t z = Fetch64(s + 24);
1817
    uint64_t a = Fetch64(s) + (len + Fetch64(s + len - 16)) * k0;
1818
    uint64_t b = Rotate(a + z, 52);
1819
    uint64_t c = Rotate(a, 37);
1820
    uint64_t vf, vs, wf, ws, r;
1821
    
1822
    a += Fetch64(s + 8);
1823
    c += Rotate(a, 7);
1824
    a += Fetch64(s + 16);
1825
    vf = a + z;
1826
    vs = b + Rotate(a, 31) + c;
1827
    a = Fetch64(s + 16) + Fetch64(s + len - 32);
1828
    z = Fetch64(s + len - 8);
1829
    b = Rotate(a + z, 52);
1830
    c = Rotate(a, 37);
1831
    a += Fetch64(s + len - 24);
1832
    c += Rotate(a, 7);
1833
    a += Fetch64(s + len - 16);
1834
    wf = a + z;
1835
    ws = b + Rotate(a, 31) + c;
1836
    r = ShiftMix((vf + ws) * k2 + (wf + vs) * k0);
1837
    return ShiftMix(r * k0 + vs) * k2;
1838
}
1839

  
1840
static uint64_t
1841
CityHash64(const char *s, size_t len) {
1842
    uint64_t x, y, z, t;
1843
    pair64 v, w;
1844
    if (len <= 32) {
1845
        if (len <= 16) {
1846
	    return HashLen0to16(s, len);
1847
	} else {
1848
	    return HashLen17to32(s, len);
1875 1849
	}
1850
    } else if (len <= 64) {
1851
        return HashLen33to64(s, len);
1876 1852
    }
1853
    
1854
    /* For strings over 64 bytes we hash the end first, and then as we
1855
       loop we keep 56 bytes of state: v, w, x, y, and z.  */
1856
    x = Fetch64(s + len - 40);
1857
    y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
1858
    z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
1859
    v = WeakHashLen32WithSeeds(s + len - 64, len, z);
1860
    w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
1861
    x = x * k1 + Fetch64(s);
1862
    
1863
    /* Decrease len to the nearest multiple of 64, and operate on
1864
       64-byte chunks.  */
1865
    len = (len - 1) & ~(size_t)(63);
1866
    do {
1867
        x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
1868
	y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
1869
	x ^= w.second;
1870
	y += v.first + Fetch64(s + 40);
1871
	z = Rotate(z + w.first, 33) * k1;
1872
	v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
1873
	w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
1874
	t = x; x = z; z = t;
1875
	s += 64;
1876
	len -= 64;
1877
    } while (len != 0);
1878
    return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
1879
		     HashLen16(v.second, w.second) + x);
1880
}
1877 1881

  
1878
    t = 0;
1879
    switch (len) {
1880
#ifdef WORDS_BIGENDIAN
1881
# define UNALIGNED_ADD(n) case (n) + 1: \
1882
	t |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 1)
1883
#else
1884
# define UNALIGNED_ADD(n) case (n) + 1: \
1885
	t |= data_at(n) << CHAR_BIT*(n)
1886
#endif
1887
	UNALIGNED_ADD_ALL;
1888
#undef UNALIGNED_ADD
1889
#if MURMUR == 1
1890
	h = murmur_step(h, t);
1891
#elif MURMUR == 2
1892
# if !UNALIGNED_WORD_ACCESS
1893
      skip_tail:
1894
# endif
1895
	h ^= t;
1896
	h *= MurmurMagic;
1897
#endif
1898
    }
1899 1882

  
1900
    return murmur_finish(h);
1883
static uint64_t
1884
CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0, uint64_t seed1) {
1885
    return HashLen16(CityHash64(s, len) - seed0, seed1);
1901 1886
}
1902 1887

  
1888
static uint64_t
1889
CityHash64WithSeed(const char *s, size_t len, uint64_t seed) {
1890
    return CityHash64WithSeeds(s, len, k2, seed);
1891
}
1892

  
1893

1894

  
1903 1895
st_index_t
1904
st_hash_uint32(st_index_t h, uint32_t i)
1905
{
1906
    return murmur_step(h + i, 16);
1896
st_hash(const void *ptr, size_t len, st_index_t h) {
1897
    return CityHash64WithSeed(ptr, len, h);
1898
}
1899

  
1900
static st_index_t
1901
strhash(st_data_t arg) {
1902
    const char *string = (const char *)arg;
1903
    return CityHash64(string, strlen(string));
1907 1904
}
1908 1905

  
1909 1906
st_index_t
1910
st_hash_uint(st_index_t h, st_index_t i)
1911
{
1912
    st_index_t v = 0;
1913
    h += i;
1914
#ifdef WORDS_BIGENDIAN
1915
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 12*8
1916
    v = murmur1(v + (h >> 12*8));
1917
#endif
1918
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 8*8
1919
    v = murmur1(v + (h >> 8*8));
1920
#endif
1921
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 4*8
1922
    v = murmur1(v + (h >> 4*8));
1923
#endif
1924
#endif
1925
    v = murmur1(v + h);
1926
#ifndef WORDS_BIGENDIAN
1927
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 4*8
1928
    v = murmur1(v + (h >> 4*8));
1929
#endif
1930
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 8*8
1931
    v = murmur1(v + (h >> 8*8));
1932
#endif
1933
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 12*8
1934
    v = murmur1(v + (h >> 12*8));
1935
#endif
1936
#endif
1937
    return v;
1907
st_hash_uint(st_index_t h, st_index_t i) {
1908
    return CityHash64WithSeed((const char *) &i, sizeof (st_index_t), h);
1938 1909
}
1939 1910

  
1940 1911
st_index_t
1941
st_hash_end(st_index_t h)
1942
{
1943
    h = murmur_step(h, 10);
1944
    h = murmur_step(h, 17);
1912
st_hash_end(st_index_t h) {
1945 1913
    return h;
1946 1914
}
1947 1915

  
1948 1916
#undef st_hash_start
1917

  
1949 1918
st_index_t
1950
st_hash_start(st_index_t h)
1951
{
1919
st_hash_start(st_index_t h) {
1952 1920
    return h;
1953 1921
}
1954 1922

  
1955
static st_index_t
1956
strhash(st_data_t arg)
1957
{
1958
    register const char *string = (const char *)arg;
1959
    return st_hash(string, strlen(string), FNV1_32A_INIT);
1960
}
1961
#endif
1923
/*
1924
 * 32 bit FNV-1 and FNV-1a non-zero initial basis
1925
 *
1926
 * The FNV-1 initial basis is the FNV-0 hash of the following 32 octets:
1927
 *
1928
 *              chongo <Landon Curt Noll> /\../\
1929
 *
1930
 * NOTE: The \'s above are not back-slashing escape characters.
1931
 * They are literal ASCII  backslash 0x5c characters.
1932
 *
1933
 * NOTE: The FNV-1a initial basis is the same value as FNV-1 by definition.
1934
 */
1935
#define FNV1_32A_INIT 0x811c9dc5
1936

  
1937
/*
1938
 * 32 bit magic FNV-1a prime
1939
 */
1940
#define FNV_32_PRIME 0x01000193
1962 1941

  
1963 1942
int
1964 1943
st_locale_insensitive_strcasecmp(const char *s1, const char *s2)