summaryrefslogtreecommitdiff
path: root/builtin
diff options
context:
space:
mode:
authorLibravatar Junio C Hamano <gitster@pobox.com>2021-03-24 14:36:27 -0700
committerLibravatar Junio C Hamano <gitster@pobox.com>2021-03-24 14:36:27 -0700
commit2744383cbda9bbbe4219bd3532757ae6d28460e1 (patch)
tree8ca08ee3555ef97487136e9e9de5699ca19d8990 /builtin
parentMerge branch 'tb/push-simple-uses-branch-merge-config' (diff)
parentbuiltin/pack-objects.c: ignore missing links with --stdin-packs (diff)
downloadtgif-2744383cbda9bbbe4219bd3532757ae6d28460e1.tar.xz
Merge branch 'tb/geometric-repack'
"git repack" so far has been only capable of repacking everything under the sun into a single pack (or split by size). A cleverer strategy to reduce the cost of repacking a repository has been introduced. * tb/geometric-repack: builtin/pack-objects.c: ignore missing links with --stdin-packs builtin/repack.c: reword comment around pack-objects flags builtin/repack.c: be more conservative with unsigned overflows builtin/repack.c: assign pack split later t7703: test --geometric repack with loose objects builtin/repack.c: do not repack single packs with --geometric builtin/repack.c: add '--geometric' option packfile: add kept-pack cache for find_kept_pack_entry() builtin/pack-objects.c: rewrite honor-pack-keep logic p5303: measure time to repack with keep p5303: add missing &&-chains builtin/pack-objects.c: add '--stdin-packs' option revision: learn '--no-kept-objects' packfile: introduce 'find_kept_pack_entry()'
Diffstat (limited to 'builtin')
-rw-r--r--builtin/pack-objects.c330
-rw-r--r--builtin/repack.c207
2 files changed, 480 insertions, 57 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 4bb602688c..8319831514 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1188,7 +1188,8 @@ static int have_duplicate_entry(const struct object_id *oid,
return 1;
}
-static int want_found_object(int exclude, struct packed_git *p)
+static int want_found_object(const struct object_id *oid, int exclude,
+ struct packed_git *p)
{
if (exclude)
return 1;
@@ -1204,27 +1205,82 @@ static int want_found_object(int exclude, struct packed_git *p)
* make sure no copy of this object appears in _any_ pack that makes us
* to omit the object, so we need to check all the packs.
*
- * We can however first check whether these options can possible matter;
+ * We can however first check whether these options can possibly matter;
* if they do not matter we know we want the object in generated pack.
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep_on_disk &&
- !ignore_packed_keep_in_core &&
- (!local || !have_non_local_packs))
- return 1;
+ /*
+ * Objects in packs borrowed from elsewhere are discarded regardless of
+ * if they appear in other packs that weren't borrowed.
+ */
if (local && !p->pack_local)
return 0;
- if (p->pack_local &&
- ((ignore_packed_keep_on_disk && p->pack_keep) ||
- (ignore_packed_keep_in_core && p->pack_keep_in_core)))
- return 0;
+
+ /*
+ * Then handle .keep first, as we have a fast(er) path there.
+ */
+ if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core) {
+ /*
+ * Set the flags for the kept-pack cache to be the ones we want
+ * to ignore.
+ *
+ * That is, if we are ignoring objects in on-disk keep packs,
+ * then we want to search through the on-disk keep and ignore
+ * the in-core ones.
+ */
+ unsigned flags = 0;
+ if (ignore_packed_keep_on_disk)
+ flags |= ON_DISK_KEEP_PACKS;
+ if (ignore_packed_keep_in_core)
+ flags |= IN_CORE_KEEP_PACKS;
+
+ if (ignore_packed_keep_on_disk && p->pack_keep)
+ return 0;
+ if (ignore_packed_keep_in_core && p->pack_keep_in_core)
+ return 0;
+ if (has_object_kept_pack(oid, flags))
+ return 0;
+ }
+
+ /*
+ * At this point we know definitively that either we don't care about
+ * keep-packs, or the object is not in one. Keep checking other
+ * conditions...
+ */
+ if (!local || !have_non_local_packs)
+ return 1;
/* we don't know yet; keep looking for more packs */
return -1;
}
+static int want_object_in_pack_one(struct packed_git *p,
+ const struct object_id *oid,
+ int exclude,
+ struct packed_git **found_pack,
+ off_t *found_offset)
+{
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
+
+ if (offset) {
+ if (!*found_pack) {
+ if (!is_pack_valid(p))
+ return -1;
+ *found_offset = offset;
+ *found_pack = p;
+ }
+ return want_found_object(oid, exclude, p);
+ }
+ return -1;
+}
+
/*
* Check whether we want the object in the pack (e.g., we do not want
* objects found in non-local stores if the "--local" option was used).
@@ -1252,7 +1308,7 @@ static int want_object_in_pack(const struct object_id *oid,
* are present we will determine the answer right now.
*/
if (*found_pack) {
- want = want_found_object(exclude, *found_pack);
+ want = want_found_object(oid, exclude, *found_pack);
if (want != -1)
return want;
}
@@ -1260,51 +1316,20 @@ static int want_object_in_pack(const struct object_id *oid,
for (m = get_multi_pack_index(the_repository); m; m = m->next) {
struct pack_entry e;
if (fill_midx_entry(the_repository, oid, &e, m)) {
- struct packed_git *p = e.p;
- off_t offset;
-
- if (p == *found_pack)
- offset = *found_offset;
- else
- offset = find_pack_entry_one(oid->hash, p);
-
- if (offset) {
- if (!*found_pack) {
- if (!is_pack_valid(p))
- continue;
- *found_offset = offset;
- *found_pack = p;
- }
- want = want_found_object(exclude, p);
- if (want != -1)
- return want;
- }
+ want = want_object_in_pack_one(e.p, oid, exclude, found_pack, found_offset);
+ if (want != -1)
+ return want;
}
}
list_for_each(pos, get_packed_git_mru(the_repository)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
- off_t offset;
-
- if (p == *found_pack)
- offset = *found_offset;
- else
- offset = find_pack_entry_one(oid->hash, p);
-
- if (offset) {
- if (!*found_pack) {
- if (!is_pack_valid(p))
- continue;
- *found_offset = offset;
- *found_pack = p;
- }
- want = want_found_object(exclude, p);
- if (!exclude && want > 0)
- list_move(&p->mru,
- get_packed_git_mru(the_repository));
- if (want != -1)
- return want;
- }
+ want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset);
+ if (!exclude && want > 0)
+ list_move(&p->mru,
+ get_packed_git_mru(the_repository));
+ if (want != -1)
+ return want;
}
if (uri_protocols.nr) {
@@ -2986,6 +3011,191 @@ static int git_pack_config(const char *k, const char *v, void *cb)
return git_default_config(k, v, cb);
}
+/* Counters for trace2 output when in --stdin-packs mode. */
+static int stdin_packs_found_nr;
+static int stdin_packs_hints_nr;
+
+static int add_object_entry_from_pack(const struct object_id *oid,
+ struct packed_git *p,
+ uint32_t pos,
+ void *_data)
+{
+ struct rev_info *revs = _data;
+ struct object_info oi = OBJECT_INFO_INIT;
+ off_t ofs;
+ enum object_type type;
+
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, 0))
+ return 0;
+
+ ofs = nth_packed_object_offset(p, pos);
+ if (!want_object_in_pack(oid, 0, &p, &ofs))
+ return 0;
+
+ oi.typep = &type;
+ if (packed_object_info(the_repository, p, ofs, &oi) < 0)
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ else if (type == OBJ_COMMIT) {
+ /*
+ * commits in included packs are used as starting points for the
+ * subsequent revision walk
+ */
+ add_pending_oid(revs, NULL, oid, 0);
+ }
+
+ stdin_packs_found_nr++;
+
+ create_object_entry(oid, type, 0, 0, 0, p, ofs);
+
+ return 0;
+}
+
+static void show_commit_pack_hint(struct commit *commit, void *_data)
+{
+ /* nothing to do; commits don't have a namehash */
+}
+
+static void show_object_pack_hint(struct object *object, const char *name,
+ void *_data)
+{
+ struct object_entry *oe = packlist_find(&to_pack, &object->oid);
+ if (!oe)
+ return;
+
+ /*
+ * Our 'to_pack' list was constructed by iterating all objects packed in
+ * included packs, and so doesn't have a non-zero hash field that you
+ * would typically pick up during a reachability traversal.
+ *
+ * Make a best-effort attempt to fill in the ->hash and ->no_try_delta
+ * here using a now in order to perhaps improve the delta selection
+ * process.
+ */
+ oe->hash = pack_name_hash(name);
+ oe->no_try_delta = name && no_try_delta(name);
+
+ stdin_packs_hints_nr++;
+}
+
+static int pack_mtime_cmp(const void *_a, const void *_b)
+{
+ struct packed_git *a = ((const struct string_list_item*)_a)->util;
+ struct packed_git *b = ((const struct string_list_item*)_b)->util;
+
+ /*
+ * order packs by descending mtime so that objects are laid out
+ * roughly as newest-to-oldest
+ */
+ if (a->mtime < b->mtime)
+ return 1;
+ else if (b->mtime < a->mtime)
+ return -1;
+ else
+ return 0;
+}
+
+static void read_packs_list_from_stdin(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list include_packs = STRING_LIST_INIT_DUP;
+ struct string_list exclude_packs = STRING_LIST_INIT_DUP;
+ struct string_list_item *item = NULL;
+
+ struct packed_git *p;
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ /*
+ * Use a revision walk to fill in the namehash of objects in the include
+ * packs. To save time, we'll avoid traversing through objects that are
+ * in excluded packs.
+ *
+ * That may cause us to avoid populating all of the namehash fields of
+ * all included objects, but our goal is best-effort, since this is only
+ * an optimization during delta selection.
+ */
+ revs.no_kept_objects = 1;
+ revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS;
+ revs.blob_objects = 1;
+ revs.tree_objects = 1;
+ revs.tag_objects = 1;
+ revs.ignore_missing_links = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '^')
+ string_list_append(&exclude_packs, buf.buf + 1);
+ else
+ string_list_append(&include_packs, buf.buf);
+
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&include_packs);
+ string_list_sort(&exclude_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+
+ item = string_list_lookup(&include_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&exclude_packs, pack_name);
+
+ if (item)
+ item->util = p;
+ }
+
+ /*
+ * First handle all of the excluded packs, marking them as kept in-core
+ * so that later calls to add_object_entry() discards any objects that
+ * are also found in excluded packs.
+ */
+ for_each_string_list_item(item, &exclude_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = 1;
+ }
+
+ /*
+ * Order packs by ascending mtime; use QSORT directly to access the
+ * string_list_item's ->util pointer, which string_list_sort() does not
+ * provide.
+ */
+ QSORT(include_packs.items, include_packs.nr, pack_mtime_cmp);
+
+ for_each_string_list_item(item, &include_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ for_each_object_in_pack(p,
+ add_object_entry_from_pack,
+ &revs,
+ FOR_EACH_OBJECT_PACK_ORDER);
+ }
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ traverse_commit_list(&revs,
+ show_commit_pack_hint,
+ show_object_pack_hint,
+ NULL);
+
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found",
+ stdin_packs_found_nr);
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints",
+ stdin_packs_hints_nr);
+
+ strbuf_release(&buf);
+ string_list_clear(&include_packs, 0);
+ string_list_clear(&exclude_packs, 0);
+}
+
static void read_object_list_from_stdin(void)
{
char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
@@ -3489,6 +3699,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
struct strvec rp = STRVEC_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
@@ -3539,6 +3750,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
N_("include objects referred to by the index"),
1, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "stdin-packs", &stdin_packs,
+ N_("read packs from stdin")),
OPT_BOOL(0, "stdout", &pack_to_stdout,
N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
@@ -3645,7 +3858,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
use_internal_rev_list = 1;
strvec_push(&rp, "--indexed-objects");
}
- if (rev_list_unpacked) {
+ if (rev_list_unpacked && !stdin_packs) {
use_internal_rev_list = 1;
strvec_push(&rp, "--unpacked");
}
@@ -3690,8 +3903,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (filter_options.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
+ if (stdin_packs)
+ die(_("cannot use --filter with --stdin-packs"));
}
+ if (stdin_packs && use_internal_rev_list)
+ die(_("cannot use internal rev list with --stdin-packs"));
+
/*
* "soft" reasons not to use bitmaps - for on-disk repack by default we want
*
@@ -3750,7 +3968,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (progress)
progress_state = start_progress(_("Enumerating objects"), 0);
- if (!use_internal_rev_list)
+ if (stdin_packs) {
+ /* avoids adding objects in excluded packs */
+ ignore_packed_keep_in_core = 1;
+ read_packs_list_from_stdin();
+ if (rev_list_unpacked)
+ add_unreachable_loose_objects();
+ } else if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
get_object_list(rp.nr, rp.v);
diff --git a/builtin/repack.c b/builtin/repack.c
index 01440de2d5..6ce2556c9e 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -297,6 +297,142 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
#define ALL_INTO_ONE 1
#define LOOSEN_UNREACHABLE 2
+struct pack_geometry {
+ struct packed_git **pack;
+ uint32_t pack_nr, pack_alloc;
+ uint32_t split;
+};
+
+static uint32_t geometry_pack_weight(struct packed_git *p)
+{
+ if (open_pack_index(p))
+ die(_("cannot open index for %s"), p->pack_name);
+ return p->num_objects;
+}
+
+static int geometry_cmp(const void *va, const void *vb)
+{
+ uint32_t aw = geometry_pack_weight(*(struct packed_git **)va),
+ bw = geometry_pack_weight(*(struct packed_git **)vb);
+
+ if (aw < bw)
+ return -1;
+ if (aw > bw)
+ return 1;
+ return 0;
+}
+
+static void init_pack_geometry(struct pack_geometry **geometry_p)
+{
+ struct packed_git *p;
+ struct pack_geometry *geometry;
+
+ *geometry_p = xcalloc(1, sizeof(struct pack_geometry));
+ geometry = *geometry_p;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!pack_kept_objects && p->pack_keep)
+ continue;
+
+ ALLOC_GROW(geometry->pack,
+ geometry->pack_nr + 1,
+ geometry->pack_alloc);
+
+ geometry->pack[geometry->pack_nr] = p;
+ geometry->pack_nr++;
+ }
+
+ QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
+}
+
+static void split_pack_geometry(struct pack_geometry *geometry, int factor)
+{
+ uint32_t i;
+ uint32_t split;
+ off_t total_size = 0;
+
+ if (!geometry->pack_nr) {
+ geometry->split = geometry->pack_nr;
+ return;
+ }
+
+ /*
+ * First, count the number of packs (in descending order of size) which
+ * already form a geometric progression.
+ */
+ for (i = geometry->pack_nr - 1; i > 0; i--) {
+ struct packed_git *ours = geometry->pack[i];
+ struct packed_git *prev = geometry->pack[i - 1];
+
+ if (unsigned_mult_overflows(factor, geometry_pack_weight(prev)))
+ die(_("pack %s too large to consider in geometric "
+ "progression"),
+ prev->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * geometry_pack_weight(prev))
+ break;
+ }
+
+ split = i;
+
+ if (split) {
+ /*
+ * Move the split one to the right, since the top element in the
+ * last-compared pair can't be in the progression. Only do this
+ * when we split in the middle of the array (otherwise if we got
+ * to the end, then the split is in the right place).
+ */
+ split++;
+ }
+
+ /*
+ * Then, anything to the left of 'split' must be in a new pack. But,
+ * creating that new pack may cause packs in the heavy half to no longer
+ * form a geometric progression.
+ *
+ * Compute an expected size of the new pack, and then determine how many
+ * packs in the heavy half need to be joined into it (if any) to restore
+ * the geometric progression.
+ */
+ for (i = 0; i < split; i++) {
+ struct packed_git *p = geometry->pack[i];
+
+ if (unsigned_add_overflows(total_size, geometry_pack_weight(p)))
+ die(_("pack %s too large to roll up"), p->pack_name);
+ total_size += geometry_pack_weight(p);
+ }
+ for (i = split; i < geometry->pack_nr; i++) {
+ struct packed_git *ours = geometry->pack[i];
+
+ if (unsigned_mult_overflows(factor, total_size))
+ die(_("pack %s too large to roll up"), ours->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * total_size) {
+ if (unsigned_add_overflows(total_size,
+ geometry_pack_weight(ours)))
+ die(_("pack %s too large to roll up"),
+ ours->pack_name);
+
+ split++;
+ total_size += geometry_pack_weight(ours);
+ } else
+ break;
+ }
+
+ geometry->split = split;
+}
+
+static void clear_pack_geometry(struct pack_geometry *geometry)
+{
+ if (!geometry)
+ return;
+
+ free(geometry->pack);
+ geometry->pack_nr = 0;
+ geometry->pack_alloc = 0;
+ geometry->split = 0;
+}
+
int cmd_repack(int argc, const char **argv, const char *prefix)
{
struct child_process cmd = CHILD_PROCESS_INIT;
@@ -304,6 +440,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct string_list names = STRING_LIST_INIT_DUP;
struct string_list rollback = STRING_LIST_INIT_NODUP;
struct string_list existing_packs = STRING_LIST_INIT_DUP;
+ struct pack_geometry *geometry = NULL;
struct strbuf line = STRBUF_INIT;
int i, ext, ret;
FILE *out;
@@ -316,6 +453,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
int no_update_server_info = 0;
struct pack_objects_args po_args = {NULL};
+ int geometric_factor = 0;
struct option builtin_repack_options[] = {
OPT_BIT('a', NULL, &pack_everything,
@@ -356,6 +494,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("repack objects in packs marked with .keep")),
OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
N_("do not repack this pack")),
+ OPT_INTEGER('g', "geometric", &geometric_factor,
+ N_("find a geometric progression with factor <N>")),
OPT_END()
};
@@ -382,6 +522,13 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (write_bitmaps && !(pack_everything & ALL_INTO_ONE))
die(_(incremental_bitmap_conflict_error));
+ if (geometric_factor) {
+ if (pack_everything)
+ die(_("--geometric is incompatible with -A, -a"));
+ init_pack_geometry(&geometry);
+ split_pack_geometry(geometry, geometric_factor);
+ }
+
packdir = mkpathdup("%s/pack", get_object_directory());
packtmp = mkpathdup("%s/.tmp-%d-pack", packdir, (int)getpid());
@@ -396,9 +543,21 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strvec_pushf(&cmd.args, "--keep-pack=%s",
keep_pack_list.items[i].string);
strvec_push(&cmd.args, "--non-empty");
- strvec_push(&cmd.args, "--all");
- strvec_push(&cmd.args, "--reflog");
- strvec_push(&cmd.args, "--indexed-objects");
+ if (!geometry) {
+ /*
+ * We need to grab all reachable objects, including those that
+ * are reachable from reflogs and the index.
+ *
+ * When repacking into a geometric progression of packs,
+ * however, we ask 'git pack-objects --stdin-packs', and it is
+ * not about packing objects based on reachability but about
+ * repacking all the objects in specified packs and loose ones
+ * (indeed, --stdin-packs is incompatible with these options).
+ */
+ strvec_push(&cmd.args, "--all");
+ strvec_push(&cmd.args, "--reflog");
+ strvec_push(&cmd.args, "--indexed-objects");
+ }
if (has_promisor_remote())
strvec_push(&cmd.args, "--exclude-promisor-objects");
if (write_bitmaps > 0)
@@ -429,17 +588,37 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strvec_push(&cmd.env_array, "GIT_REF_PARANOIA=1");
}
}
+ } else if (geometry) {
+ strvec_push(&cmd.args, "--stdin-packs");
+ strvec_push(&cmd.args, "--unpacked");
} else {
strvec_push(&cmd.args, "--unpacked");
strvec_push(&cmd.args, "--incremental");
}
- cmd.no_stdin = 1;
+ if (geometry)
+ cmd.in = -1;
+ else
+ cmd.no_stdin = 1;
ret = start_command(&cmd);
if (ret)
return ret;
+ if (geometry) {
+ FILE *in = xfdopen(cmd.in, "w");
+ /*
+ * The resulting pack should contain all objects in packs that
+ * are going to be rolled up, but exclude objects in packs which
+ * are being left alone.
+ */
+ for (i = 0; i < geometry->split; i++)
+ fprintf(in, "%s\n", pack_basename(geometry->pack[i]));
+ for (i = geometry->split; i < geometry->pack_nr; i++)
+ fprintf(in, "^%s\n", pack_basename(geometry->pack[i]));
+ fclose(in);
+ }
+
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
if (line.len != the_hash_algo->hexsz)
@@ -507,6 +686,25 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (!string_list_has_string(&names, sha1))
remove_redundant_pack(packdir, item->string);
}
+
+ if (geometry) {
+ struct strbuf buf = STRBUF_INIT;
+
+ uint32_t i;
+ for (i = 0; i < geometry->split; i++) {
+ struct packed_git *p = geometry->pack[i];
+ if (string_list_has_string(&names,
+ hash_to_hex(p->hash)))
+ continue;
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ remove_redundant_pack(packdir, buf.buf);
+ }
+ strbuf_release(&buf);
+ }
if (!po_args.quiet && isatty(2))
opts |= PRUNE_PACKED_VERBOSE;
prune_packed_objects(opts);
@@ -528,6 +726,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
string_list_clear(&existing_packs, 0);
+ clear_pack_geometry(geometry);
strbuf_release(&line);
return 0;