summaryrefslogtreecommitdiff
path: root/builtin/pack-objects.c
diff options
context:
space:
mode:
Diffstat (limited to 'builtin/pack-objects.c')
-rw-r--r--builtin/pack-objects.c190
1 files changed, 140 insertions, 50 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 53307d53b0..0954375be9 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -25,8 +25,8 @@
#include "argv-array.h"
static const char *pack_usage[] = {
- N_("git pack-objects --stdout [options...] [< ref-list | < object-list]"),
- N_("git pack-objects [options...] base-name [< ref-list | < object-list]"),
+ N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
+ N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
NULL
};
@@ -44,7 +44,9 @@ static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int keep_unreachable, unpack_unreachable, include_tag;
static unsigned long unpack_unreachable_expiration;
+static int pack_loose_unreachable;
static int local;
+static int have_non_local_packs;
static int incremental;
static int ignore_packed_keep;
static int allow_ofs_delta;
@@ -341,15 +343,15 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_reuse_object(struct sha1file *f, struct object_entry *entry,
- unsigned long limit, int usable_delta)
+static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
enum object_type type = entry->type;
- unsigned long datalen;
+ off_t datalen;
unsigned char header[10], dheader[10];
unsigned hdrlen;
@@ -415,11 +417,12 @@ static unsigned long write_reuse_object(struct sha1file *f, struct object_entry
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_object(struct sha1file *f,
- struct object_entry *entry,
- off_t write_offset)
+static off_t write_object(struct sha1file *f,
+ struct object_entry *entry,
+ off_t write_offset)
{
- unsigned long limit, len;
+ unsigned long limit;
+ off_t len;
int usable_delta, to_reuse;
if (!pack_to_stdout)
@@ -491,7 +494,7 @@ static enum write_one_status write_one(struct sha1file *f,
struct object_entry *e,
off_t *offset)
{
- unsigned long size;
+ off_t size;
int recursing;
/*
@@ -624,7 +627,7 @@ static struct object_entry **compute_write_order(void)
{
unsigned int i, wo_end, last_untagged;
- struct object_entry **wo = xmalloc(to_pack.nr_objects * sizeof(*wo));
+ struct object_entry **wo;
struct object_entry *objects = to_pack.objects;
for (i = 0; i < to_pack.nr_objects; i++) {
@@ -657,6 +660,7 @@ static struct object_entry **compute_write_order(void)
* Give the objects in the original recency order until
* we see a tagged tip.
*/
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
break;
@@ -758,6 +762,10 @@ static off_t write_reused_pack(struct sha1file *f)
return reuse_packfile_offset - sizeof(struct pack_header);
}
+static const char no_split_warning[] = N_(
+"disabling bitmap writing, packs are split due to pack.packSizeLimit"
+);
+
static void write_pack_file(void)
{
uint32_t i = 0, j;
@@ -769,7 +777,7 @@ static void write_pack_file(void)
if (progress > pack_to_stdout)
progress_state = start_progress(_("Writing objects"), nr_result);
- written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list));
+ ALLOC_ARRAY(written_list, to_pack.nr_objects);
write_order = compute_write_order();
do {
@@ -812,7 +820,10 @@ static void write_pack_file(void)
fixup_pack_header_footer(fd, sha1, pack_tmp_name,
nr_written, sha1, offset);
close(fd);
- write_bitmap_index = 0;
+ if (write_bitmap_index) {
+ warning(_(no_split_warning));
+ write_bitmap_index = 0;
+ }
}
if (!pack_to_stdout) {
@@ -827,8 +838,7 @@ static void write_pack_file(void)
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning("failed to stat %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno("failed to stat %s", pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
@@ -836,8 +846,7 @@ static void write_pack_file(void)
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning("failed utime() on %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno("failed utime() on %s", pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
@@ -970,6 +979,23 @@ static int want_object_in_pack(const unsigned char *sha1,
return 1;
if (incremental)
return 0;
+
+ /*
+ * When asked to do --local (do not include an
+ * object that appears in a pack we borrow
+ * from elsewhere) or --honor-pack-keep (do not
+ * include an object that appears in a pack marked
+ * with .keep), we need to make sure no copy of this
+ * object come from in _any_ pack that causes us to
+ * omit it, and need to complete this loop. When
+ * neither option is in effect, we know the object
+ * we just found is going to be packed, so break
+ * out of the loop to return 1 now.
+ */
+ if (!ignore_packed_keep &&
+ (!local || !have_non_local_packs))
+ break;
+
if (local && !p->pack_local)
return 0;
if (ignore_packed_keep && p->pack_local && p->pack_keep)
@@ -1185,7 +1211,7 @@ static void add_pbase_object(struct tree_desc *tree,
if (cmp < 0)
return;
if (name[cmplen] != '/') {
- add_object_entry(entry.sha1,
+ add_object_entry(entry.oid->hash,
object_type(entry.mode),
fullname, 1);
return;
@@ -1196,7 +1222,7 @@ static void add_pbase_object(struct tree_desc *tree,
const char *down = name+cmplen+1;
int downlen = name_cmp_len(down);
- tree = pbase_tree_get(entry.sha1);
+ tree = pbase_tree_get(entry.oid->hash);
if (!tree)
return;
init_tree_desc(&sub, tree->tree_data, tree->tree_size);
@@ -2097,6 +2123,35 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
#endif
+static void add_tag_chain(const struct object_id *oid)
+{
+ struct tag *tag;
+
+ /*
+ * We catch duplicates already in add_object_entry(), but we'd
+ * prefer to do this extra check to avoid having to parse the
+ * tag at all if we already know that it's being packed (e.g., if
+ * it was included via bitmaps, we would not have parsed it
+ * previously).
+ */
+ if (packlist_find(&to_pack, oid->hash, NULL))
+ return;
+
+ tag = lookup_tag(oid->hash);
+ while (1) {
+ if (!tag || parse_tag(tag) || !tag->tagged)
+ die("unable to pack objects reachable from tag %s",
+ oid_to_hex(oid));
+
+ add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL, 0);
+
+ if (tag->tagged->type != OBJ_TAG)
+ return;
+
+ tag = (struct tag *)tag->tagged;
+ }
+}
+
static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
{
struct object_id peeled;
@@ -2104,7 +2159,7 @@ static int add_ref_tag(const char *path, const struct object_id *oid, int flag,
if (starts_with(path, "refs/tags/") && /* is a tag? */
!peel_ref(path, peeled.hash) && /* peelable? */
packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */
- add_object_entry(oid->hash, OBJ_TAG, NULL, 0);
+ add_tag_chain(oid);
return 0;
}
@@ -2129,7 +2184,7 @@ static void prepare_pack(int window, int depth)
if (!to_pack.nr_objects || !window || !depth)
return;
- delta_list = xmalloc(to_pack.nr_objects * sizeof(*delta_list));
+ ALLOC_ARRAY(delta_list, to_pack.nr_objects);
nr_deltas = n = 0;
for (i = 0; i < to_pack.nr_objects; i++) {
@@ -2277,7 +2332,7 @@ static void read_object_list_from_stdin(void)
static void show_commit(struct commit *commit, void *data)
{
- add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
+ add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL, 0);
commit->object.flags |= OBJECT_ADDED;
if (write_bitmap_index)
@@ -2287,13 +2342,13 @@ static void show_commit(struct commit *commit, void *data)
static void show_object(struct object *obj, const char *name, void *data)
{
add_preferred_base_object(name);
- add_object_entry(obj->sha1, obj->type, name, 0);
+ add_object_entry(obj->oid.hash, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
}
static void show_edge(struct commit *commit)
{
- add_preferred_base(commit->object.sha1);
+ add_preferred_base(commit->object.oid.hash);
}
struct in_pack_object {
@@ -2309,7 +2364,7 @@ struct in_pack {
static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
{
- in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
+ in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
in_pack->array[in_pack->nr].object = object;
in_pack->nr++;
}
@@ -2328,7 +2383,7 @@ static int ofscmp(const void *a_, const void *b_)
else if (a->offset > b->offset)
return 1;
else
- return hashcmp(a->object->sha1, b->object->sha1);
+ return oidcmp(&a->object->oid, &b->object->oid);
}
static void add_objects_in_unpacked_packs(struct rev_info *revs)
@@ -2366,12 +2421,38 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
ofscmp);
for (i = 0; i < in_pack.nr; i++) {
struct object *o = in_pack.array[i].object;
- add_object_entry(o->sha1, o->type, "", 0);
+ add_object_entry(o->oid.hash, o->type, "", 0);
}
}
free(in_pack.array);
}
+static int add_loose_object(const unsigned char *sha1, const char *path,
+ void *data)
+{
+ enum object_type type = sha1_object_info(sha1, NULL);
+
+ if (type < 0) {
+ warning("loose object at %s could not be examined", path);
+ return 0;
+ }
+
+ add_object_entry(sha1, type, "", 0);
+ return 0;
+}
+
+/*
+ * We actually don't even have to worry about reachability here.
+ * add_object_entry will weed out duplicates, so we just add every
+ * loose object we find.
+ */
+static void add_unreachable_loose_objects(void)
+{
+ for_each_loose_file_in_objdir(get_object_directory(),
+ add_loose_object,
+ NULL, NULL, NULL);
+}
+
static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
{
static struct packed_git *last_found = (void *)1;
@@ -2473,12 +2554,12 @@ static void record_recent_object(struct object *obj,
const char *name,
void *data)
{
- sha1_array_append(&recent_objects, obj->sha1);
+ sha1_array_append(&recent_objects, obj->oid.hash);
}
static void record_recent_commit(struct commit *commit, void *data)
{
- sha1_array_append(&recent_objects, commit->object.sha1);
+ sha1_array_append(&recent_objects, commit->object.oid.hash);
}
static void get_object_list(int ac, const char **av)
@@ -2541,6 +2622,8 @@ static void get_object_list(int ac, const char **av)
if (keep_unreachable)
add_objects_in_unpacked_packs(&revs);
+ if (pack_loose_unreachable)
+ add_unreachable_loose_objects();
if (unpack_unreachable)
loosen_unused_packed_objects(&revs);
@@ -2577,23 +2660,6 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
-static int option_parse_ulong(const struct option *opt,
- const char *arg, int unset)
-{
- if (unset)
- die(_("option %s does not accept negative form"),
- opt->long_name);
-
- if (!git_parse_ulong(arg, opt->value))
- die(_("unable to parse value '%s' for option %s"),
- arg, opt->long_name);
- return 0;
-}
-
-#define OPT_ULONG(s, l, v, h) \
- { OPTION_CALLBACK, (s), (l), (v), "n", (h), \
- PARSE_OPT_NONEG, option_parse_ulong }
-
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -2616,16 +2682,16 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{ OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
N_("write the pack index file in the specified idx format version"),
0, option_parse_index_version },
- OPT_ULONG(0, "max-pack-size", &pack_size_limit,
- N_("maximum size of each output pack file")),
+ OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
+ N_("maximum size of each output pack file")),
OPT_BOOL(0, "local", &local,
N_("ignore borrowed objects from alternate object store")),
OPT_BOOL(0, "incremental", &incremental,
N_("ignore packed objects")),
OPT_INTEGER(0, "window", &window,
N_("limit pack window by objects")),
- OPT_ULONG(0, "window-memory", &window_memory_limit,
- N_("limit pack window by memory in addition to object limit")),
+ OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
+ N_("limit pack window by memory in addition to object limit")),
OPT_INTEGER(0, "depth", &depth,
N_("maximum length of delta chain allowed in the resulting pack")),
OPT_BOOL(0, "reuse-delta", &reuse_delta,
@@ -2658,6 +2724,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("include tag objects that refer to objects to be packed")),
OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
N_("keep unreachable objects")),
+ OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
+ N_("pack loose unreachable objects")),
{ OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
N_("unpack unreachable objects newer than <time>"),
PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
@@ -2763,6 +2831,28 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
progress = 2;
prepare_packed_git();
+ if (ignore_packed_keep) {
+ struct packed_git *p;
+ for (p = packed_git; p; p = p->next)
+ if (p->pack_local && p->pack_keep)
+ break;
+ if (!p) /* no keep-able packs found */
+ ignore_packed_keep = 0;
+ }
+ if (local) {
+ /*
+ * unlike ignore_packed_keep above, we do not want to
+ * unset "local" based on looking at packs, as it
+ * also covers non-local objects
+ */
+ struct packed_git *p;
+ for (p = packed_git; p; p = p->next) {
+ if (!p->pack_local) {
+ have_non_local_packs = 1;
+ break;
+ }
+ }
+ }
if (progress)
progress_state = start_progress(_("Counting objects"), 0);