6 * Idea here is very simple.
8 * We have total of (sz-N+1) N-byte overlapping sequences in buf whose
9 * size is sz. If the same N-byte sequence appears in both source and
10 * destination, we say the byte that starts that sequence is shared
11 * between them (i.e. copied from source to destination).
13 * For each possible N-byte sequence, if the source buffer has more
14 * instances of it than the destination buffer, that means the
15 * difference are the number of bytes not copied from source to
16 * destination. If the counts are the same, everything was copied
17 * from source to destination. If the destination has more,
18 * everything was copied, and destination added more.
20 * We are doing an approximation so we do not really have to waste
21 * memory by actually storing the sequence. We just hash them into
22 * somewhere around 2^16 hashbuckets and count the occurrences.
24 * The length of the sequence is arbitrarily set to 8 for now.
27 /* Wild guess at the initial hash size */
28 #define INITIAL_HASH_SIZE 10
29 #define HASHBASE 65537 /* next_prime(2^16) */
32 unsigned long hashval;
38 struct spanhash data[FLEX_ARRAY];
41 static struct spanhash *spanhash_find(struct spanhash_top *top, unsigned long hashval)
43 int sz = 1 << top->alloc_log2;
44 int bucket = hashval & (sz - 1);
46 struct spanhash *h = &(top->data[bucket++]);
49 if (h->hashval == hashval)
56 static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
58 struct spanhash_top *new;
60 int osz = 1 << orig->alloc_log2;
63 new = xmalloc(sizeof(*orig) + sizeof(struct spanhash) * sz);
64 new->alloc_log2 = orig->alloc_log2 + 1;
66 memset(new->data, 0, sizeof(struct spanhash) * sz);
67 for (i = 0; i < osz; i++) {
68 struct spanhash *o = &(orig->data[i]);
72 bucket = o->hashval & (sz - 1);
74 struct spanhash *h = &(new->data[bucket++]);
76 h->hashval = o->hashval;
89 static struct spanhash_top *add_spanhash(struct spanhash_top *top,
90 unsigned long hashval)
95 lim = (1 << top->alloc_log2);
96 bucket = hashval & (lim - 1);
98 h = &(top->data[bucket++]);
100 h->hashval = hashval;
104 return spanhash_rehash(top);
107 if (h->hashval == hashval) {
116 static struct spanhash_top *hash_chars(unsigned char *buf, unsigned long sz)
119 unsigned long accum1, accum2, hashval;
120 struct spanhash_top *hash;
122 i = INITIAL_HASH_SIZE;
123 hash = xmalloc(sizeof(*hash) + sizeof(struct spanhash) * (1<<i));
124 hash->alloc_log2 = i;
125 hash->free = (1<<i)/2;
126 memset(hash->data, 0, sizeof(struct spanhash) * (1<<i));
128 /* an 8-byte shift register made of accum1 and accum2. New
129 * bytes come at LSB of accum2, and shifted up to accum1
131 for (i = accum1 = accum2 = 0; i < 7; i++, sz--) {
132 accum1 = (accum1 << 8) | (accum2 >> 24);
133 accum2 = (accum2 << 8) | *buf++;
136 accum1 = (accum1 << 8) | (accum2 >> 24);
137 accum2 = (accum2 << 8) | *buf++;
138 hashval = (accum1 + accum2 * 0x61) % HASHBASE;
139 hash = add_spanhash(hash, hashval);
145 int diffcore_count_changes(void *src, unsigned long src_size,
146 void *dst, unsigned long dst_size,
149 unsigned long delta_limit,
150 unsigned long *src_copied,
151 unsigned long *literal_added)
154 struct spanhash_top *src_count, *dst_count;
155 unsigned long sc, la;
157 if (src_size < 8 || dst_size < 8)
160 src_count = dst_count = NULL;
162 src_count = *src_count_p;
164 src_count = hash_chars(src, src_size);
166 *src_count_p = src_count;
169 dst_count = *dst_count_p;
171 dst_count = hash_chars(dst, dst_size);
173 *dst_count_p = dst_count;
177 ssz = 1 << src_count->alloc_log2;
178 for (i = 0; i < ssz; i++) {
179 struct spanhash *s = &(src_count->data[i]);
181 unsigned dst_cnt, src_cnt;
185 d = spanhash_find(dst_count, s->hashval);
186 dst_cnt = d ? d->cnt : 0;
187 if (src_cnt < dst_cnt) {
188 la += dst_cnt - src_cnt;