From 2834bc27c16e4ae103768dd6dda530f0e46116af Mon Sep 17 00:00:00 2001 From: Vicent Marti Date: Thu, 24 Oct 2013 14:01:06 -0400 Subject: pack-objects: refactor the packing list The hash table that stores the packing list for a given `pack-objects` run was tightly coupled to the pack-objects code. In this commit, we refactor the hash table and the underlying storage array into a `packing_data` struct. The functionality for accessing and adding entries to the packing list is hence accessible from other parts of Git besides the `pack-objects` builtin. This refactoring is a requirement for further patches in this series that will require accessing the commit packing list from outside of `pack-objects`. The hash table implementation has been minimally altered: we now use table sizes which are always a power of two, to ensure a uniform index distribution in the array. Signed-off-by: Vicent Marti Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 175 +++++++++++-------------------------------------- 1 file changed, 40 insertions(+), 135 deletions(-) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 36273dd6f0..f3f0cf95a8 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -14,6 +14,7 @@ #include "diff.h" #include "revision.h" #include "list-objects.h" +#include "pack-objects.h" #include "progress.h" #include "refs.h" #include "streaming.h" @@ -25,42 +26,15 @@ static const char *pack_usage[] = { NULL }; -struct object_entry { - struct pack_idx_entry idx; - unsigned long size; /* uncompressed size */ - struct packed_git *in_pack; /* already in pack */ - off_t in_pack_offset; - struct object_entry *delta; /* delta base object */ - struct object_entry *delta_child; /* deltified objects who bases me */ - struct object_entry *delta_sibling; /* other deltified objects who - * uses the same base as me - */ - void *delta_data; /* cached delta (uncompressed) */ - unsigned long delta_size; /* delta data size (uncompressed) */ - unsigned long z_delta_size; /* delta data size (compressed) */ - enum object_type type; - enum object_type in_pack_type; /* could be delta */ - uint32_t hash; /* name hint hash */ - unsigned char in_pack_header_size; - unsigned preferred_base:1; /* - * we do not pack this, but is available - * to be used as the base object to delta - * objects against. - */ - unsigned no_try_delta:1; - unsigned tagged:1; /* near the very tip of refs */ - unsigned filled:1; /* assigned write-order */ -}; - /* - * Objects we are going to pack are collected in objects array (dynamically - * expanded). nr_objects & nr_alloc controls this array. They are stored - * in the order we see -- typically rev-list --objects order that gives us - * nice "minimum seek" order. + * Objects we are going to pack are collected in the `to_pack` structure. + * It contains an array (dynamically expanded) of the object data, and a map + * that can resolve SHA1s to their position in the array. */ -static struct object_entry *objects; +static struct packing_data to_pack; + static struct pack_idx_entry **written_list; -static uint32_t nr_objects, nr_alloc, nr_result, nr_written; +static uint32_t nr_result, nr_written; static int non_empty; static int reuse_delta = 1, reuse_object = 1; @@ -89,22 +63,12 @@ static unsigned long cache_max_small_delta_size = 1000; static unsigned long window_memory_limit = 0; -/* - * The object names in objects array are hashed with this hashtable, - * to help looking up the entry by object name. - * This hashtable is built after all the objects are seen. - */ -static int *object_ix; -static int object_ix_hashsz; -static struct object_entry *locate_object_entry(const unsigned char *sha1); - /* * stats */ static uint32_t written, written_delta; static uint32_t reused, reused_delta; - static void *get_delta(struct object_entry *entry) { unsigned long size, base_size, delta_size; @@ -553,12 +517,12 @@ static int mark_tagged(const char *path, const unsigned char *sha1, int flag, void *cb_data) { unsigned char peeled[20]; - struct object_entry *entry = locate_object_entry(sha1); + struct object_entry *entry = packlist_find(&to_pack, sha1, NULL); if (entry) entry->tagged = 1; if (!peel_ref(path, peeled)) { - entry = locate_object_entry(peeled); + entry = packlist_find(&to_pack, peeled, NULL); if (entry) entry->tagged = 1; } @@ -633,9 +597,10 @@ static struct object_entry **compute_write_order(void) { unsigned int i, wo_end, last_untagged; - struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo)); + struct object_entry **wo = xmalloc(to_pack.nr_objects * sizeof(*wo)); + struct object_entry *objects = to_pack.objects; - for (i = 0; i < nr_objects; i++) { + for (i = 0; i < to_pack.nr_objects; i++) { objects[i].tagged = 0; objects[i].filled = 0; objects[i].delta_child = NULL; @@ -647,7 +612,7 @@ static struct object_entry **compute_write_order(void) * Make sure delta_sibling is sorted in the original * recency order. */ - for (i = nr_objects; i > 0;) { + for (i = to_pack.nr_objects; i > 0;) { struct object_entry *e = &objects[--i]; if (!e->delta) continue; @@ -665,7 +630,7 @@ static struct object_entry **compute_write_order(void) * Give the objects in the original recency order until * we see a tagged tip. */ - for (i = wo_end = 0; i < nr_objects; i++) { + for (i = wo_end = 0; i < to_pack.nr_objects; i++) { if (objects[i].tagged) break; add_to_write_order(wo, &wo_end, &objects[i]); @@ -675,7 +640,7 @@ static struct object_entry **compute_write_order(void) /* * Then fill all the tagged tips. */ - for (; i < nr_objects; i++) { + for (; i < to_pack.nr_objects; i++) { if (objects[i].tagged) add_to_write_order(wo, &wo_end, &objects[i]); } @@ -683,7 +648,7 @@ static struct object_entry **compute_write_order(void) /* * And then all remaining commits and tags. */ - for (i = last_untagged; i < nr_objects; i++) { + for (i = last_untagged; i < to_pack.nr_objects; i++) { if (objects[i].type != OBJ_COMMIT && objects[i].type != OBJ_TAG) continue; @@ -693,7 +658,7 @@ static struct object_entry **compute_write_order(void) /* * And then all the trees. */ - for (i = last_untagged; i < nr_objects; i++) { + for (i = last_untagged; i < to_pack.nr_objects; i++) { if (objects[i].type != OBJ_TREE) continue; add_to_write_order(wo, &wo_end, &objects[i]); @@ -702,13 +667,13 @@ static struct object_entry **compute_write_order(void) /* * Finally all the rest in really tight order */ - for (i = last_untagged; i < nr_objects; i++) { + for (i = last_untagged; i < to_pack.nr_objects; i++) { if (!objects[i].filled) add_family_to_write_order(wo, &wo_end, &objects[i]); } - if (wo_end != nr_objects) - die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects); + if (wo_end != to_pack.nr_objects) + die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); return wo; } @@ -724,7 +689,7 @@ static void write_pack_file(void) if (progress > pack_to_stdout) progress_state = start_progress("Writing objects", nr_result); - written_list = xmalloc(nr_objects * sizeof(*written_list)); + written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list)); write_order = compute_write_order(); do { @@ -740,7 +705,7 @@ static void write_pack_file(void) if (!offset) die_errno("unable to write pack header"); nr_written = 0; - for (; i < nr_objects; i++) { + for (; i < to_pack.nr_objects; i++) { struct object_entry *e = write_order[i]; if (write_one(f, e, &offset) == WRITE_ONE_BREAK) break; @@ -803,7 +768,7 @@ static void write_pack_file(void) written_list[j]->offset = (off_t)-1; } nr_remaining -= nr_written; - } while (nr_remaining && i < nr_objects); + } while (nr_remaining && i < to_pack.nr_objects); free(written_list); free(write_order); @@ -813,53 +778,6 @@ static void write_pack_file(void) written, nr_result); } -static int locate_object_entry_hash(const unsigned char *sha1) -{ - int i; - unsigned int ui; - memcpy(&ui, sha1, sizeof(unsigned int)); - i = ui % object_ix_hashsz; - while (0 < object_ix[i]) { - if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1)) - return i; - if (++i == object_ix_hashsz) - i = 0; - } - return -1 - i; -} - -static struct object_entry *locate_object_entry(const unsigned char *sha1) -{ - int i; - - if (!object_ix_hashsz) - return NULL; - - i = locate_object_entry_hash(sha1); - if (0 <= i) - return &objects[object_ix[i]-1]; - return NULL; -} - -static void rehash_objects(void) -{ - uint32_t i; - struct object_entry *oe; - - object_ix_hashsz = nr_objects * 3; - if (object_ix_hashsz < 1024) - object_ix_hashsz = 1024; - object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz); - memset(object_ix, 0, sizeof(int) * object_ix_hashsz); - for (i = 0, oe = objects; i < nr_objects; i++, oe++) { - int ix = locate_object_entry_hash(oe->idx.sha1); - if (0 <= ix) - continue; - ix = -1 - ix; - object_ix[ix] = i + 1; - } -} - static uint32_t name_hash(const char *name) { uint32_t c, hash = 0; @@ -908,13 +826,12 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, struct object_entry *entry; struct packed_git *p, *found_pack = NULL; off_t found_offset = 0; - int ix; uint32_t hash = name_hash(name); + uint32_t index_pos; - ix = nr_objects ? locate_object_entry_hash(sha1) : -1; - if (ix >= 0) { + entry = packlist_find(&to_pack, sha1, &index_pos); + if (entry) { if (exclude) { - entry = objects + object_ix[ix] - 1; if (!entry->preferred_base) nr_result--; entry->preferred_base = 1; @@ -947,14 +864,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, } } - if (nr_objects >= nr_alloc) { - nr_alloc = (nr_alloc + 1024) * 3 / 2; - objects = xrealloc(objects, nr_alloc * sizeof(*entry)); - } - - entry = objects + nr_objects++; - memset(entry, 0, sizeof(*entry)); - hashcpy(entry->idx.sha1, sha1); + entry = packlist_alloc(&to_pack, sha1, index_pos); entry->hash = hash; if (type) entry->type = type; @@ -967,12 +877,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, entry->in_pack_offset = found_offset; } - if (object_ix_hashsz * 3 <= nr_objects * 4) - rehash_objects(); - else - object_ix[-1 - ix] = nr_objects; - - display_progress(progress_state, nr_objects); + display_progress(progress_state, to_pack.nr_objects); if (name && no_try_delta(name)) entry->no_try_delta = 1; @@ -1329,7 +1234,7 @@ static void check_object(struct object_entry *entry) break; } - if (base_ref && (base_entry = locate_object_entry(base_ref))) { + if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) { /* * If base_ref was set above that means we wish to * reuse delta data, and we even found that base @@ -1403,12 +1308,12 @@ static void get_object_details(void) uint32_t i; struct object_entry **sorted_by_offset; - sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *)); - for (i = 0; i < nr_objects; i++) - sorted_by_offset[i] = objects + i; - qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort); + sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *)); + for (i = 0; i < to_pack.nr_objects; i++) + sorted_by_offset[i] = to_pack.objects + i; + qsort(sorted_by_offset, to_pack.nr_objects, sizeof(*sorted_by_offset), pack_offset_sort); - for (i = 0; i < nr_objects; i++) { + for (i = 0; i < to_pack.nr_objects; i++) { struct object_entry *entry = sorted_by_offset[i]; check_object(entry); if (big_file_threshold < entry->size) @@ -2034,7 +1939,7 @@ static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, vo if (!prefixcmp(path, "refs/tags/") && /* is a tag? */ !peel_ref(path, peeled) && /* peelable? */ - locate_object_entry(peeled)) /* object packed? */ + packlist_find(&to_pack, peeled, NULL)) /* object packed? */ add_object_entry(sha1, OBJ_TAG, NULL, 0); return 0; } @@ -2057,14 +1962,14 @@ static void prepare_pack(int window, int depth) if (!pack_to_stdout) do_check_packed_object_crc = 1; - if (!nr_objects || !window || !depth) + if (!to_pack.nr_objects || !window || !depth) return; - delta_list = xmalloc(nr_objects * sizeof(*delta_list)); + delta_list = xmalloc(to_pack.nr_objects * sizeof(*delta_list)); nr_deltas = n = 0; - for (i = 0; i < nr_objects; i++) { - struct object_entry *entry = objects + i; + for (i = 0; i < to_pack.nr_objects; i++) { + struct object_entry *entry = to_pack.objects + i; if (entry->delta) /* This happens if we decided to reuse existing @@ -2342,7 +2247,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs) for (i = 0; i < p->num_objects; i++) { sha1 = nth_packed_object_sha1(p, i); - if (!locate_object_entry(sha1) && + if (!packlist_find(&to_pack, sha1, NULL) && !has_sha1_pack_kept_or_nonlocal(sha1)) if (force_object_loose(sha1, p->mtime)) die("unable to force loose object"); -- cgit v1.2.3 From 68fb36eb92ff98ec81a066592a07b3f411450a1d Mon Sep 17 00:00:00 2001 From: Vicent Marti Date: Thu, 24 Oct 2013 14:01:29 -0400 Subject: pack-objects: factor out name_hash As the pack-objects system grows beyond the single pack-objects.c file, more parts (like the soon-to-exist bitmap code) will need to compute hashes for matching deltas. Factor out name_hash to make it available to other files. Signed-off-by: Vicent Marti Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index f3f0cf95a8..faf746b2a7 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -778,26 +778,6 @@ static void write_pack_file(void) written, nr_result); } -static uint32_t name_hash(const char *name) -{ - uint32_t c, hash = 0; - - if (!name) - return 0; - - /* - * This effectively just creates a sortable number from the - * last sixteen non-whitespace characters. Last characters - * count "most", so things that end in ".c" sort together. - */ - while ((c = *name++) != 0) { - if (isspace(c)) - continue; - hash = (hash >> 2) + (c << 24); - } - return hash; -} - static void setup_delta_attr_check(struct git_attr_check *check) { static struct git_attr *attr_delta; @@ -826,7 +806,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, struct object_entry *entry; struct packed_git *p, *found_pack = NULL; off_t found_offset = 0; - uint32_t hash = name_hash(name); + uint32_t hash = pack_name_hash(name); uint32_t index_pos; entry = packlist_find(&to_pack, sha1, &index_pos); @@ -1082,7 +1062,7 @@ static void add_preferred_base_object(const char *name) { struct pbase_tree *it; int cmplen; - unsigned hash = name_hash(name); + unsigned hash = pack_name_hash(name); if (!num_preferred_base || check_pbase_path(hash)) return; -- cgit v1.2.3 From ce2bc42456b88c5f01f7f591cf0cc9db1a5bfc3d Mon Sep 17 00:00:00 2001 From: Jeff King Date: Sat, 21 Dec 2013 09:00:06 -0500 Subject: pack-objects: split add_object_entry This function actually does three things: 1. Check whether we've already added the object to our packing list. 2. Check whether the object meets our criteria for adding. 3. Actually add the object to our packing list. It's a little hard to see these three phases, because they happen linearly in the rather long function. Instead, this patch breaks them up into three separate helper functions. The result is a little easier to follow, though it unfortunately suffers from some optimization interdependencies between the stages (e.g., during step 3 we use the packing list index from step 1 and the packfile information from step 2). More importantly, though, the various parts can be composed differently, as they will be in the next patch. Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 98 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 78 insertions(+), 20 deletions(-) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index faf746b2a7..13b171d649 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -800,41 +800,69 @@ static int no_try_delta(const char *path) return 0; } -static int add_object_entry(const unsigned char *sha1, enum object_type type, - const char *name, int exclude) +/* + * When adding an object, check whether we have already added it + * to our packing list. If so, we can skip. However, if we are + * being asked to excludei t, but the previous mention was to include + * it, make sure to adjust its flags and tweak our numbers accordingly. + * + * As an optimization, we pass out the index position where we would have + * found the item, since that saves us from having to look it up again a + * few lines later when we want to add the new entry. + */ +static int have_duplicate_entry(const unsigned char *sha1, + int exclude, + uint32_t *index_pos) { struct object_entry *entry; - struct packed_git *p, *found_pack = NULL; - off_t found_offset = 0; - uint32_t hash = pack_name_hash(name); - uint32_t index_pos; - entry = packlist_find(&to_pack, sha1, &index_pos); - if (entry) { - if (exclude) { - if (!entry->preferred_base) - nr_result--; - entry->preferred_base = 1; - } + entry = packlist_find(&to_pack, sha1, index_pos); + if (!entry) return 0; + + if (exclude) { + if (!entry->preferred_base) + nr_result--; + entry->preferred_base = 1; } + return 1; +} + +/* + * Check whether we want the object in the pack (e.g., we do not want + * objects found in non-local stores if the "--local" option was used). + * + * As a side effect of this check, we will find the packed version of this + * object, if any. We therefore pass out the pack information to avoid having + * to look it up again later. + */ +static int want_object_in_pack(const unsigned char *sha1, + int exclude, + struct packed_git **found_pack, + off_t *found_offset) +{ + struct packed_git *p; + if (!exclude && local && has_loose_object_nonlocal(sha1)) return 0; + *found_pack = NULL; + *found_offset = 0; + for (p = packed_git; p; p = p->next) { off_t offset = find_pack_entry_one(sha1, p); if (offset) { - if (!found_pack) { + if (!*found_pack) { if (!is_pack_valid(p)) { warning("packfile %s cannot be accessed", p->pack_name); continue; } - found_offset = offset; - found_pack = p; + *found_offset = offset; + *found_pack = p; } if (exclude) - break; + return 1; if (incremental) return 0; if (local && !p->pack_local) @@ -844,6 +872,20 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, } } + return 1; +} + +static void create_object_entry(const unsigned char *sha1, + enum object_type type, + uint32_t hash, + int exclude, + int no_try_delta, + uint32_t index_pos, + struct packed_git *found_pack, + off_t found_offset) +{ + struct object_entry *entry; + entry = packlist_alloc(&to_pack, sha1, index_pos); entry->hash = hash; if (type) @@ -857,11 +899,27 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, entry->in_pack_offset = found_offset; } - display_progress(progress_state, to_pack.nr_objects); + entry->no_try_delta = no_try_delta; +} + +static int add_object_entry(const unsigned char *sha1, enum object_type type, + const char *name, int exclude) +{ + struct packed_git *found_pack; + off_t found_offset; + uint32_t index_pos; - if (name && no_try_delta(name)) - entry->no_try_delta = 1; + if (have_duplicate_entry(sha1, exclude, &index_pos)) + return 0; + if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) + return 0; + + create_object_entry(sha1, type, pack_name_hash(name), + exclude, name && no_try_delta(name), + index_pos, found_pack, found_offset); + + display_progress(progress_state, to_pack.nr_objects); return 1; } -- cgit v1.2.3 From 6b8fda2db1d69606954711b606c546c0e4e51680 Mon Sep 17 00:00:00 2001 From: Vicent Marti Date: Sat, 21 Dec 2013 09:00:09 -0500 Subject: pack-objects: use bitmaps when packing objects In this patch, we use the bitmap API to perform the `Counting Objects` phase in pack-objects, rather than a traditional walk through the object graph. For a reasonably-packed large repo, the time to fetch and clone is often dominated by the full-object revision walk during the Counting Objects phase. Using bitmaps can reduce the CPU time required on the server (and therefore start sending the actual pack data with less delay). For bitmaps to be used, the following must be true: 1. We must be packing to stdout (as a normal `pack-objects` from `upload-pack` would do). 2. There must be a .bitmap index containing at least one of the "have" objects that the client is asking for. 3. Bitmaps must be enabled (they are enabled by default, but can be disabled by setting `pack.usebitmaps` to false, or by using `--no-use-bitmap-index` on the command-line). If any of these is not true, we fall back to doing a normal walk of the object graph. Here are some sample timings from a full pack of `torvalds/linux` (i.e. something very similar to what would be generated for a clone of the repository) that show the speedup produced by various methods: [existing graph traversal] $ time git pack-objects --all --stdout --no-use-bitmap-index \ /dev/null Counting objects: 3237103, done. Compressing objects: 100% (508752/508752), done. Total 3237103 (delta 2699584), reused 3237103 (delta 2699584) real 0m44.111s user 0m42.396s sys 0m3.544s [bitmaps only, without partial pack reuse; note that pack reuse is automatic, so timing this required a patch to disable it] $ time git pack-objects --all --stdout /dev/null Counting objects: 3237103, done. Compressing objects: 100% (508752/508752), done. Total 3237103 (delta 2699584), reused 3237103 (delta 2699584) real 0m5.413s user 0m5.604s sys 0m1.804s [bitmaps with pack reuse (what you get with this patch)] $ time git pack-objects --all --stdout /dev/null Reusing existing pack: 3237103, done. Total 3237103 (delta 0), reused 0 (delta 0) real 0m1.636s user 0m1.460s sys 0m0.172s Signed-off-by: Vicent Marti Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 13b171d649..030d894083 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -19,6 +19,7 @@ #include "refs.h" #include "streaming.h" #include "thread-utils.h" +#include "pack-bitmap.h" static const char *pack_usage[] = { N_("git pack-objects --stdout [options...] [< ref-list | < object-list]"), @@ -57,6 +58,12 @@ static struct progress *progress_state; static int pack_compression_level = Z_DEFAULT_COMPRESSION; static int pack_compression_seen; +static struct packed_git *reuse_packfile; +static uint32_t reuse_packfile_objects; +static off_t reuse_packfile_offset; + +static int use_bitmap_index = 1; + static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = 256 * 1024 * 1024; static unsigned long cache_max_small_delta_size = 1000; @@ -678,6 +685,46 @@ static struct object_entry **compute_write_order(void) return wo; } +static off_t write_reused_pack(struct sha1file *f) +{ + unsigned char buffer[8192]; + off_t to_write; + int fd; + + if (!is_pack_valid(reuse_packfile)) + die("packfile is invalid: %s", reuse_packfile->pack_name); + + fd = git_open_noatime(reuse_packfile->pack_name); + if (fd < 0) + die_errno("unable to open packfile for reuse: %s", + reuse_packfile->pack_name); + + if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) + die_errno("unable to seek in reused packfile"); + + if (reuse_packfile_offset < 0) + reuse_packfile_offset = reuse_packfile->pack_size - 20; + + to_write = reuse_packfile_offset - sizeof(struct pack_header); + + while (to_write) { + int read_pack = xread(fd, buffer, sizeof(buffer)); + + if (read_pack <= 0) + die_errno("unable to read from reused packfile"); + + if (read_pack > to_write) + read_pack = to_write; + + sha1write(f, buffer, read_pack); + to_write -= read_pack; + } + + close(fd); + written += reuse_packfile_objects; + return reuse_packfile_offset - sizeof(struct pack_header); +} + static void write_pack_file(void) { uint32_t i = 0, j; @@ -704,6 +751,15 @@ static void write_pack_file(void) offset = write_pack_header(f, nr_remaining); if (!offset) die_errno("unable to write pack header"); + + if (reuse_packfile) { + off_t packfile_size; + assert(pack_to_stdout); + + packfile_size = write_reused_pack(f); + offset += packfile_size; + } + nr_written = 0; for (; i < to_pack.nr_objects; i++) { struct object_entry *e = write_order[i]; @@ -923,6 +979,22 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type, return 1; } +static int add_object_entry_from_bitmap(const unsigned char *sha1, + enum object_type type, + int flags, uint32_t name_hash, + struct packed_git *pack, off_t offset) +{ + uint32_t index_pos; + + if (have_duplicate_entry(sha1, 0, &index_pos)) + return 0; + + create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset); + + display_progress(progress_state, to_pack.nr_objects); + return 1; +} + struct pbase_tree_cache { unsigned char sha1[20]; int ref; @@ -2085,6 +2157,10 @@ static int git_pack_config(const char *k, const char *v, void *cb) cache_max_small_delta_size = git_config_int(k, v); return 0; } + if (!strcmp(k, "pack.usebitmaps")) { + use_bitmap_index = git_config_bool(k, v); + return 0; + } if (!strcmp(k, "pack.threads")) { delta_search_threads = git_config_int(k, v); if (delta_search_threads < 0) @@ -2293,6 +2369,29 @@ static void loosen_unused_packed_objects(struct rev_info *revs) } } +static int get_object_list_from_bitmap(struct rev_info *revs) +{ + if (prepare_bitmap_walk(revs) < 0) + return -1; + + if (!reuse_partial_packfile_from_bitmap( + &reuse_packfile, + &reuse_packfile_objects, + &reuse_packfile_offset)) { + assert(reuse_packfile_objects); + nr_result += reuse_packfile_objects; + + if (progress) { + fprintf(stderr, "Reusing existing pack: %d, done.\n", + reuse_packfile_objects); + fflush(stderr); + } + } + + traverse_bitmap_commit_list(&add_object_entry_from_bitmap); + return 0; +} + static void get_object_list(int ac, const char **av) { struct rev_info revs; @@ -2320,6 +2419,9 @@ static void get_object_list(int ac, const char **av) die("bad revision '%s'", line); } + if (use_bitmap_index && !get_object_list_from_bitmap(&revs)) + return; + if (prepare_revision_walk(&revs)) die("revision walk setup failed"); mark_edges_uninteresting(&revs, show_edge); @@ -2449,6 +2551,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) N_("pack compression level")), OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents, N_("do not hide commits by grafts"), 0), + OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index, + N_("use a bitmap index if available to speed up counting objects")), OPT_END(), }; @@ -2515,6 +2619,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (keep_unreachable && unpack_unreachable) die("--keep-unreachable and --unpack-unreachable are incompatible."); + if (!use_internal_rev_list || !pack_to_stdout || is_repository_shallow()) + use_bitmap_index = 0; + if (progress && all_progress_implied) progress = 2; -- cgit v1.2.3 From 7cc8f9710857ed69d34c696330f7fd0367a5a29c Mon Sep 17 00:00:00 2001 From: Vicent Marti Date: Sat, 21 Dec 2013 09:00:16 -0500 Subject: pack-objects: implement bitmap writing This commit extends more the functionality of `pack-objects` by allowing it to write out a `.bitmap` index next to any written packs, together with the `.idx` index that currently gets written. If bitmap writing is enabled for a given repository (either by calling `pack-objects` with the `--write-bitmap-index` flag or by having `pack.writebitmaps` set to `true` in the config) and pack-objects is writing a packfile that would normally be indexed (i.e. not piping to stdout), we will attempt to write the corresponding bitmap index for the packfile. Bitmap index writing happens after the packfile and its index has been successfully written to disk (`finish_tmp_packfile`). The process is performed in several steps: 1. `bitmap_writer_set_checksum`: this call stores the partial checksum for the packfile being written; the checksum will be written in the resulting bitmap index to verify its integrity 2. `bitmap_writer_build_type_index`: this call uses the array of `struct object_entry` that has just been sorted when writing out the actual packfile index to disk to generate 4 type-index bitmaps (one for each object type). These bitmaps have their nth bit set if the given object is of the bitmap's type. E.g. the nth bit of the Commits bitmap will be 1 if the nth object in the packfile index is a commit. This is a very cheap operation because the bitmap writing code has access to the metadata stored in the `struct object_entry` array, and hence the real type for each object in the packfile. 3. `bitmap_writer_reuse_bitmaps`: if there exists an existing bitmap index for one of the packfiles we're trying to repack, this call will efficiently rebuild the existing bitmaps so they can be reused on the new index. All the existing bitmaps will be stored in a `reuse` hash table, and the commit selection phase will prioritize these when selecting, as they can be written directly to the new index without having to perform a revision walk to fill the bitmap. This can greatly speed up the repack of a repository that already has bitmaps. 4. `bitmap_writer_select_commits`: if bitmap writing is enabled for a given `pack-objects` run, the sequence of commits generated during the Counting Objects phase will be stored in an array. We then use that array to build up the list of selected commits. Writing a bitmap in the index for each object in the repository would be cost-prohibitive, so we use a simple heuristic to pick the commits that will be indexed with bitmaps. The current heuristics are a simplified version of JGit's original implementation. We select a higher density of commits depending on their age: the 100 most recent commits are always selected, after that we pick 1 commit of each 100, and the gap increases as the commits grow older. On top of that, we make sure that every single branch that has not been merged (all the tips that would be required from a clone) gets their own bitmap, and when selecting commits between a gap, we tend to prioritize the commit with the most parents. Do note that there is no right/wrong way to perform commit selection; different selection algorithms will result in different commits being selected, but there's no such thing as "missing a commit". The bitmap walker algorithm implemented in `prepare_bitmap_walk` is able to adapt to missing bitmaps by performing manual walks that complete the bitmap: the ideal selection algorithm, however, would select the commits that are more likely to be used as roots for a walk in the future (e.g. the tips of each branch, and so on) to ensure a bitmap for them is always available. 5. `bitmap_writer_build`: this is the computationally expensive part of bitmap generation. Based on the list of commits that were selected in the previous step, we perform several incremental walks to generate the bitmap for each commit. The walks begin from the oldest commit, and are built up incrementally for each branch. E.g. consider this dag where A, B, C, D, E, F are the selected commits, and a, b, c, e are a chunk of simplified history that will not receive bitmaps. A---a---B--b--C--c--D \ E--e--F We start by building the bitmap for A, using A as the root for a revision walk and marking all the objects that are reachable until the walk is over. Once this bitmap is stored, we reuse the bitmap walker to perform the walk for B, assuming that once we reach A again, the walk will be terminated because A has already been SEEN on the previous walk. This process is repeated for C, and D, but when we try to generate the bitmaps for E, we can reuse neither the current walk nor the bitmap we have generated so far. What we do now is resetting both the walk and clearing the bitmap, and performing the walk from scratch using E as the origin. This new walk, however, does not need to be completed. Once we hit B, we can lookup the bitmap we have already stored for that commit and OR it with the existing bitmap we've composed so far, allowing us to limit the walk early. After all the bitmaps have been generated, another iteration through the list of commits is performed to find the best XOR offsets for compression before writing them to disk. Because of the incremental nature of these bitmaps, XORing one of them with its predecesor results in a minimal "bitmap delta" most of the time. We can write this delta to the on-disk bitmap index, and then re-compose the original bitmaps by XORing them again when loaded. This is a phase very similar to pack-object's `find_delta` (using bitmaps instead of objects, of course), except the heuristics have been greatly simplified: we only check the 10 bitmaps before any given one to find best compressing one. This gives good results in practice, because there is locality in the ordering of the objects (and therefore bitmaps) in the packfile. 6. `bitmap_writer_finish`: the last step in the process is serializing to disk all the bitmap data that has been generated in the two previous steps. The bitmap is written to a tmp file and then moved atomically to its final destination, using the same process as `pack-write.c:write_idx_file`. Signed-off-by: Vicent Marti Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 030d894083..fd6ae01ba4 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -63,6 +63,7 @@ static uint32_t reuse_packfile_objects; static off_t reuse_packfile_offset; static int use_bitmap_index = 1; +static int write_bitmap_index; static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = 256 * 1024 * 1024; @@ -76,6 +77,24 @@ static unsigned long window_memory_limit = 0; static uint32_t written, written_delta; static uint32_t reused, reused_delta; +/* + * Indexed commits + */ +static struct commit **indexed_commits; +static unsigned int indexed_commits_nr; +static unsigned int indexed_commits_alloc; + +static void index_commit_for_bitmap(struct commit *commit) +{ + if (indexed_commits_nr >= indexed_commits_alloc) { + indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; + indexed_commits = xrealloc(indexed_commits, + indexed_commits_alloc * sizeof(struct commit *)); + } + + indexed_commits[indexed_commits_nr++] = commit; +} + static void *get_delta(struct object_entry *entry) { unsigned long size, base_size, delta_size; @@ -812,9 +831,30 @@ static void write_pack_file(void) if (sizeof(tmpname) <= strlen(base_name) + 50) die("pack base name '%s' too long", base_name); snprintf(tmpname, sizeof(tmpname), "%s-", base_name); + + if (write_bitmap_index) { + bitmap_writer_set_checksum(sha1); + bitmap_writer_build_type_index(written_list, nr_written); + } + finish_tmp_packfile(tmpname, pack_tmp_name, written_list, nr_written, &pack_idx_opts, sha1); + + if (write_bitmap_index) { + char *end_of_name_prefix = strrchr(tmpname, 0); + sprintf(end_of_name_prefix, "%s.bitmap", sha1_to_hex(sha1)); + + stop_progress(&progress_state); + + bitmap_writer_show_progress(progress); + bitmap_writer_reuse_bitmaps(&to_pack); + bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); + bitmap_writer_build(&to_pack); + bitmap_writer_finish(written_list, nr_written, tmpname); + write_bitmap_index = 0; + } + free(pack_tmp_name); puts(sha1_to_hex(sha1)); } @@ -2157,6 +2197,10 @@ static int git_pack_config(const char *k, const char *v, void *cb) cache_max_small_delta_size = git_config_int(k, v); return 0; } + if (!strcmp(k, "pack.writebitmaps")) { + write_bitmap_index = git_config_bool(k, v); + return 0; + } if (!strcmp(k, "pack.usebitmaps")) { use_bitmap_index = git_config_bool(k, v); return 0; @@ -2219,6 +2263,9 @@ static void show_commit(struct commit *commit, void *data) { add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0); commit->object.flags |= OBJECT_ADDED; + + if (write_bitmap_index) + index_commit_for_bitmap(commit); } static void show_object(struct object *obj, @@ -2411,6 +2458,7 @@ static void get_object_list(int ac, const char **av) if (*line == '-') { if (!strcmp(line, "--not")) { flags ^= UNINTERESTING; + write_bitmap_index = 0; continue; } die("not a rev '%s'", line); @@ -2553,6 +2601,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) N_("do not hide commits by grafts"), 0), OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index, N_("use a bitmap index if available to speed up counting objects")), + OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index, + N_("write a bitmap index together with the pack index")), OPT_END(), }; @@ -2622,6 +2672,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (!use_internal_rev_list || !pack_to_stdout || is_repository_shallow()) use_bitmap_index = 0; + if (pack_to_stdout || !rev_list_all) + write_bitmap_index = 0; + if (progress && all_progress_implied) progress = 2; -- cgit v1.2.3 From ae4f07fbccaab6dc93be52c0f34e137dd9fcbcf4 Mon Sep 17 00:00:00 2001 From: Vicent Marti Date: Sat, 21 Dec 2013 09:00:45 -0500 Subject: pack-bitmap: implement optional name_hash cache When we use pack bitmaps rather than walking the object graph, we end up with the list of objects to include in the packfile, but we do not know the path at which any tree or blob objects would be found. In a recently packed repository, this is fine. A fetch would use the paths only as a heuristic in the delta compression phase, and a fully packed repository should not need to do much delta compression. As time passes, though, we may acquire more objects on top of our large bitmapped pack. If clients fetch frequently, then they never even look at the bitmapped history, and all works as usual. However, a client who has not fetched since the last bitmap repack will have "have" tips in the bitmapped history, but "want" newer objects. The bitmaps themselves degrade gracefully in this circumstance. We manually walk the more recent bits of history, and then use bitmaps when we hit them. But we would also like to perform delta compression between the newer objects and the bitmapped objects (both to delta against what we know the user already has, but also between "new" and "old" objects that the user is fetching). The lack of pathnames makes our delta heuristics much less effective. This patch adds an optional cache of the 32-bit name_hash values to the end of the bitmap file. If present, a reader can use it to match bitmapped and non-bitmapped names during delta compression. Here are perf results for p5310: Test origin/master HEAD^ HEAD ------------------------------------------------------------------------------------------------- 5310.2: repack to disk 36.81(37.82+1.43) 47.70(48.74+1.41) +29.6% 47.75(48.70+1.51) +29.7% 5310.3: simulated clone 30.78(29.70+2.14) 1.08(0.97+0.10) -96.5% 1.07(0.94+0.12) -96.5% 5310.4: simulated fetch 3.16(6.10+0.08) 3.54(10.65+0.06) +12.0% 1.70(3.07+0.06) -46.2% 5310.6: partial bitmap 36.76(43.19+1.81) 6.71(11.25+0.76) -81.7% 4.08(6.26+0.46) -88.9% You can see that the time spent on an incremental fetch goes down, as our delta heuristics are able to do their work. And we save time on the partial bitmap clone for the same reason. Signed-off-by: Vicent Marti Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- builtin/pack-objects.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'builtin/pack-objects.c') diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index fd6ae01ba4..fd741970e6 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -64,6 +64,7 @@ static off_t reuse_packfile_offset; static int use_bitmap_index = 1; static int write_bitmap_index; +static uint16_t write_bitmap_options; static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = 256 * 1024 * 1024; @@ -851,7 +852,8 @@ static void write_pack_file(void) bitmap_writer_reuse_bitmaps(&to_pack); bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); bitmap_writer_build(&to_pack); - bitmap_writer_finish(written_list, nr_written, tmpname); + bitmap_writer_finish(written_list, nr_written, + tmpname, write_bitmap_options); write_bitmap_index = 0; } @@ -2201,6 +2203,12 @@ static int git_pack_config(const char *k, const char *v, void *cb) write_bitmap_index = git_config_bool(k, v); return 0; } + if (!strcmp(k, "pack.writebitmaphashcache")) { + if (git_config_bool(k, v)) + write_bitmap_options |= BITMAP_OPT_HASH_CACHE; + else + write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE; + } if (!strcmp(k, "pack.usebitmaps")) { use_bitmap_index = git_config_bool(k, v); return 0; -- cgit v1.2.3