summaryrefslogtreecommitdiff
path: root/builtin/pack-objects.c
diff options
context:
space:
mode:
Diffstat (limited to 'builtin/pack-objects.c')
-rw-r--r--builtin/pack-objects.c1501
1 files changed, 1042 insertions, 459 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index c3a75166bd..d1144a8f7e 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1,5 +1,7 @@
#include "builtin.h"
#include "cache.h"
+#include "repository.h"
+#include "config.h"
#include "attr.h"
#include "object.h"
#include "blob.h"
@@ -14,6 +16,8 @@
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
#include "pack-objects.h"
#include "progress.h"
#include "refs.h"
@@ -23,10 +27,26 @@
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
+#include "list.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "dir.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
- N_("git pack-objects --stdout [options...] [< ref-list | < object-list]"),
- N_("git pack-objects [options...] base-name [< ref-list | < object-list]"),
+ N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
+ N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
NULL
};
@@ -38,15 +58,18 @@ static const char *pack_usage[] = {
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int keep_unreachable, unpack_unreachable, include_tag;
-static unsigned long unpack_unreachable_expiration;
+static timestamp_t unpack_unreachable_expiration;
+static int pack_loose_unreachable;
static int local;
+static int have_non_local_packs;
static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
@@ -58,23 +81,34 @@ static int delta_search_threads;
static int pack_to_stdout;
static int num_preferred_base;
static struct progress *progress_state;
-static int pack_compression_level = Z_DEFAULT_COMPRESSION;
-static int pack_compression_seen;
static struct packed_git *reuse_packfile;
static uint32_t reuse_packfile_objects;
static off_t reuse_packfile_offset;
-static int use_bitmap_index = 1;
+static int use_bitmap_index_default = 1;
+static int use_bitmap_index = -1;
static int write_bitmap_index;
static uint16_t write_bitmap_options;
+static int exclude_promisor_objects;
+
static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
+static struct list_objects_filter_options filter_options;
+
+enum missing_action {
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
+};
+static enum missing_action arg_missing_action;
+static show_object_fn fn_show_object;
+
/*
* stats
*/
@@ -104,16 +138,23 @@ static void *get_delta(struct object_entry *entry)
void *buf, *base_buf, *delta_buf;
enum object_type type;
- buf = read_sha1_file(entry->idx.sha1, &type, &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
- die("unable to read %s", sha1_to_hex(entry->idx.sha1));
- base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
+ die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
- die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
+ die("unable to read %s",
+ oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
- if (!delta_buf || delta_size != entry->delta_size)
- die("delta size changed");
+ /*
+ * We succesfully computed this delta once but dropped it for
+ * memory reasons. Something is very wrong if this time we
+ * recompute and create a different delta.
+ */
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
+ BUG("delta size changed");
free(buf);
free(base_buf);
return delta_buf;
@@ -144,8 +185,8 @@ static unsigned long do_compress(void **pptr, unsigned long size)
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
- const unsigned char *sha1)
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
+ const struct object_id *oid)
{
git_zstream stream;
unsigned char ibuf[1024 * 16];
@@ -159,7 +200,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
int zret = Z_OK;
readlen = read_istream(st, ibuf, sizeof(ibuf));
if (readlen == -1)
- die(_("unable to read %s"), sha1_to_hex(sha1));
+ die(_("unable to read %s"), oid_to_hex(oid));
stream.next_in = ibuf;
stream.avail_in = readlen;
@@ -168,7 +209,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
@@ -213,7 +254,7 @@ static int check_pack_inflate(struct packed_git *p,
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
@@ -226,50 +267,52 @@ static void copy_pack_data(struct sha1file *f,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
- unsigned char header[10], dheader[10];
+ unsigned char header[MAX_PACK_OBJECT_HEADER],
+ dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
enum object_type type;
void *buf;
struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
- (st = open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL)
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
+ (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
- buf = read_sha1_file(entry->idx.sha1, &type, &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
- die(_("unable to read %s"), sha1_to_hex(entry->idx.sha1));
+ die(_("unable to read %s"),
+ oid_to_hex(&entry->idx.oid));
}
/*
* make sure no cached delta data remains from a
* previous attempt before a pack split occurred.
*/
- free(entry->delta_data);
- entry->delta_data = NULL;
+ FREE_AND_NULL(entry->delta_data);
entry->z_delta_size = 0;
} else if (entry->delta_data) {
- size = entry->delta_size;
+ size = DELTA_SIZE(entry);
buf = entry->delta_data;
entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
@@ -284,7 +327,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
* The object header is a byte of 'type' followed by zero or
* more bytes of length.
*/
- hdrlen = encode_in_pack_object_header(type, size, header);
+ hdrlen = encode_in_pack_object_header(header, sizeof(header),
+ type, size);
if (type == OBJ_OFS_DELTA) {
/*
@@ -292,48 +336,48 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
* encoding of the relative offset for the delta
* base from this object's position in the pack.
*/
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
* Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
+ * additional bytes for the base object ID.
*/
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
- datalen = write_large_blob_data(st, f, entry->idx.sha1);
+ datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
@@ -341,29 +385,34 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_reuse_object(struct sha1file *f, struct object_entry *entry,
- unsigned long limit, int usable_delta)
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
{
- struct packed_git *p = entry->in_pack;
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
- enum object_type type = entry->type;
- unsigned long datalen;
- unsigned char header[10], dheader[10];
+ enum object_type type = oe_type(entry);
+ off_t datalen;
+ unsigned char header[MAX_PACK_OBJECT_HEADER],
+ dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
- hdrlen = encode_in_pack_object_header(type, entry->size, header);
+ hdrlen = encode_in_pack_object_header(header, sizeof(header),
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
datalen = revidx[1].offset - offset;
if (!pack_to_stdout && p->index_version > 1 &&
check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
+ error(_("bad packed object CRC for %s"),
+ oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
}
@@ -372,41 +421,42 @@ static unsigned long write_reuse_object(struct sha1file *f, struct object_entry
datalen -= entry->in_pack_header_size;
if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
- error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
+ error(_("corrupt packed object for %s"),
+ oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
}
if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
reused_delta++;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
@@ -415,11 +465,12 @@ static unsigned long write_reuse_object(struct sha1file *f, struct object_entry
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_object(struct sha1file *f,
- struct object_entry *entry,
- off_t write_offset)
+static off_t write_object(struct hashfile *f,
+ struct object_entry *entry,
+ off_t write_offset)
{
- unsigned long limit, len;
+ unsigned long limit;
+ off_t len;
int usable_delta, to_reuse;
if (!pack_to_stdout)
@@ -437,28 +488,29 @@ static unsigned long write_object(struct sha1file *f,
else
limit = pack_size_limit - write_offset;
- if (!entry->delta)
+ if (!DELTA(entry))
usable_delta = 0; /* no delta */
else if (!pack_size_limit)
usable_delta = 1; /* unlimited packfile */
- else if (entry->delta->idx.offset == (off_t)-1)
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
usable_delta = 0; /* base was written to another pack */
- else if (entry->delta->idx.offset)
+ else if (DELTA(entry)->idx.offset)
usable_delta = 1; /* base already exists in this pack */
else
usable_delta = 0; /* base could end up in another pack */
if (!reuse_object)
to_reuse = 0; /* explicit */
- else if (!entry->in_pack)
+ else if (!IN_PACK(entry))
to_reuse = 0; /* can't reuse what we don't have */
- else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (entry->type != entry->in_pack_type)
+ else if (oe_type(entry) != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
- else if (entry->delta)
+ else if (DELTA(entry))
to_reuse = 0; /* we want to pack afresh */
else
to_reuse = 1; /* we have it in-pack undeltified,
@@ -487,11 +539,11 @@ enum write_one_status {
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
- unsigned long size;
+ off_t size;
int recursing;
/*
@@ -501,8 +553,8 @@ static enum write_one_status write_one(struct sha1file *f,
*/
recursing = (e->idx.offset == 1);
if (recursing) {
- warning("recursive delta detected for object %s",
- sha1_to_hex(e->idx.sha1));
+ warning(_("recursive delta detected for object %s"),
+ oid_to_hex(&e->idx.oid));
return WRITE_ONE_RECURSIVE;
} else if (e->idx.offset || e->preferred_base) {
/* offset is non zero if object is written already. */
@@ -510,12 +562,12 @@ static enum write_one_status write_one(struct sha1file *f,
}
/* if we are deltified, write out base object first. */
- if (e->delta) {
+ if (DELTA(e)) {
e->idx.offset = 1; /* now recurse */
- switch (write_one(f, e->delta, offset)) {
+ switch (write_one(f, DELTA(e), offset)) {
case WRITE_ONE_RECURSIVE:
/* we cannot depend on this one */
- e->delta = NULL;
+ SET_DELTA(e, NULL);
break;
default:
break;
@@ -535,21 +587,21 @@ static enum write_one_status write_one(struct sha1file *f,
/* make sure off_t is sufficiently large not to wrap */
if (signed_add_overflows(*offset, size))
- die("pack too large for current definition of off_t");
+ die(_("pack too large for current definition of off_t"));
*offset += size;
return WRITE_ONE_WRITTEN;
}
-static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
+static int mark_tagged(const char *path, const struct object_id *oid, int flag,
void *cb_data)
{
- unsigned char peeled[20];
- struct object_entry *entry = packlist_find(&to_pack, sha1, NULL);
+ struct object_id peeled;
+ struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
if (entry)
entry->tagged = 1;
- if (!peel_ref(path, peeled)) {
- entry = packlist_find(&to_pack, peeled, NULL);
+ if (!peel_ref(path, &peeled)) {
+ entry = packlist_find(&to_pack, peeled.hash, NULL);
if (entry)
entry->tagged = 1;
}
@@ -577,34 +629,34 @@ static void add_descendants_to_write_order(struct object_entry **wo,
/* add this node... */
add_to_write_order(wo, endp, e);
/* all its siblings... */
- for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
add_to_write_order(wo, endp, s);
}
}
/* drop down a level to add left subtree nodes if possible */
- if (e->delta_child) {
+ if (DELTA_CHILD(e)) {
add_to_order = 1;
- e = e->delta_child;
+ e = DELTA_CHILD(e);
} else {
add_to_order = 0;
/* our sibling might have some children, it is next */
- if (e->delta_sibling) {
- e = e->delta_sibling;
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
continue;
}
/* go back to our parent node */
- e = e->delta;
- while (e && !e->delta_sibling) {
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
/* we're on the right side of a subtree, keep
* going up until we can go right again */
- e = e->delta;
+ e = DELTA(e);
}
if (!e) {
/* done- we hit our original root node */
return;
}
/* pass it off to sibling at this level */
- e = e->delta_sibling;
+ e = DELTA_SIBLING(e);
}
};
}
@@ -615,7 +667,7 @@ static void add_family_to_write_order(struct object_entry **wo,
{
struct object_entry *root;
- for (root = e; root->delta; root = root->delta)
+ for (root = e; DELTA(root); root = DELTA(root))
; /* nothing */
add_descendants_to_write_order(wo, endp, root);
}
@@ -624,14 +676,14 @@ static struct object_entry **compute_write_order(void)
{
unsigned int i, wo_end, last_untagged;
- struct object_entry **wo = xmalloc(to_pack.nr_objects * sizeof(*wo));
+ struct object_entry **wo;
struct object_entry *objects = to_pack.objects;
for (i = 0; i < to_pack.nr_objects; i++) {
objects[i].tagged = 0;
objects[i].filled = 0;
- objects[i].delta_child = NULL;
- objects[i].delta_sibling = NULL;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
}
/*
@@ -641,11 +693,11 @@ static struct object_entry **compute_write_order(void)
*/
for (i = to_pack.nr_objects; i > 0;) {
struct object_entry *e = &objects[--i];
- if (!e->delta)
+ if (!DELTA(e))
continue;
/* Mark me as the first child */
- e->delta_sibling = e->delta->delta_child;
- e->delta->delta_child = e;
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
}
/*
@@ -657,6 +709,7 @@ static struct object_entry **compute_write_order(void)
* Give the objects in the original recency order until
* we see a tagged tip.
*/
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
break;
@@ -676,8 +729,8 @@ static struct object_entry **compute_write_order(void)
* And then all remaining commits and tags.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_COMMIT &&
- objects[i].type != OBJ_TAG)
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
@@ -686,7 +739,7 @@ static struct object_entry **compute_write_order(void)
* And then all the trees.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_TREE)
+ if (oe_type(&objects[i]) != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
@@ -700,30 +753,31 @@ static struct object_entry **compute_write_order(void)
}
if (wo_end != to_pack.nr_objects)
- die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
+ die(_("ordered %u objects, expected %"PRIu32),
+ wo_end, to_pack.nr_objects);
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
int fd;
if (!is_pack_valid(reuse_packfile))
- die("packfile is invalid: %s", reuse_packfile->pack_name);
+ die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
- fd = git_open_noatime(reuse_packfile->pack_name);
+ fd = git_open(reuse_packfile->pack_name);
if (fd < 0)
- die_errno("unable to open packfile for reuse: %s",
+ die_errno(_("unable to open packfile for reuse: %s"),
reuse_packfile->pack_name);
if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
- die_errno("unable to seek in reused packfile");
+ die_errno(_("unable to seek in reused packfile"));
if (reuse_packfile_offset < 0)
- reuse_packfile_offset = reuse_packfile->pack_size - 20;
+ reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
@@ -731,12 +785,12 @@ static off_t write_reused_pack(struct sha1file *f)
int read_pack = xread(fd, buffer, sizeof(buffer));
if (read_pack <= 0)
- die_errno("unable to read from reused packfile");
+ die_errno(_("unable to read from reused packfile"));
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
@@ -758,10 +812,14 @@ static off_t write_reused_pack(struct sha1file *f)
return reuse_packfile_offset - sizeof(struct pack_header);
}
+static const char no_split_warning[] = N_(
+"disabling bitmap writing, packs are split due to pack.packSizeLimit"
+);
+
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
@@ -769,15 +827,15 @@ static void write_pack_file(void)
if (progress > pack_to_stdout)
progress_state = start_progress(_("Writing objects"), nr_result);
- written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list));
+ ALLOC_ARRAY(written_list, to_pack.nr_objects);
write_order = compute_write_order();
do {
- unsigned char sha1[20];
+ struct object_id oid;
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
@@ -804,15 +862,18 @@ static void write_pack_file(void)
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, sha1, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, sha1, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = sha1close(f, sha1, 0);
- fixup_pack_header_footer(fd, sha1, pack_tmp_name,
- nr_written, sha1, offset);
+ int fd = finalize_hashfile(f, oid.hash, 0);
+ fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
+ nr_written, oid.hash, offset);
close(fd);
- write_bitmap_index = 0;
+ if (write_bitmap_index) {
+ warning(_(no_split_warning));
+ write_bitmap_index = 0;
+ }
}
if (!pack_to_stdout) {
@@ -827,8 +888,7 @@ static void write_pack_file(void)
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning("failed to stat %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno(_("failed to stat %s"), pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
@@ -836,23 +896,23 @@ static void write_pack_file(void)
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning("failed utime() on %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno(_("failed utime() on %s"), pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
if (write_bitmap_index) {
- bitmap_writer_set_checksum(sha1);
- bitmap_writer_build_type_index(written_list, nr_written);
+ bitmap_writer_set_checksum(oid.hash);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
written_list, nr_written,
- &pack_idx_opts, sha1);
+ &pack_idx_opts, oid.hash);
if (write_bitmap_index) {
- strbuf_addf(&tmpname, "%s.bitmap", sha1_to_hex(sha1));
+ strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid));
stop_progress(&progress_state);
@@ -867,7 +927,7 @@ static void write_pack_file(void)
strbuf_release(&tmpname);
free(pack_tmp_name);
- puts(sha1_to_hex(sha1));
+ puts(oid_to_hex(&oid));
}
/* mark written objects as written to previous pack */
@@ -881,28 +941,19 @@ static void write_pack_file(void)
free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
- die("wrote %"PRIu32" objects while expecting %"PRIu32,
- written, nr_result);
-}
-
-static void setup_delta_attr_check(struct git_attr_check *check)
-{
- static struct git_attr *attr_delta;
-
- if (!attr_delta)
- attr_delta = git_attr("delta");
-
- check[0].attr = attr_delta;
+ die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
+ written, nr_result);
}
static int no_try_delta(const char *path)
{
- struct git_attr_check check[1];
+ static struct attr_check *check;
- setup_delta_attr_check(check);
- if (git_check_attr(path, ARRAY_SIZE(check), check))
+ if (!check)
+ check = attr_check_initl("delta", NULL);
+ if (git_check_attr(&the_index, path, check))
return 0;
- if (ATTR_FALSE(check->value))
+ if (ATTR_FALSE(check->items[0].value))
return 1;
return 0;
}
@@ -917,13 +968,13 @@ static int no_try_delta(const char *path)
* found the item, since that saves us from having to look it up again a
* few lines later when we want to add the new entry.
*/
-static int have_duplicate_entry(const unsigned char *sha1,
+static int have_duplicate_entry(const struct object_id *oid,
int exclude,
uint32_t *index_pos)
{
struct object_entry *entry;
- entry = packlist_find(&to_pack, sha1, index_pos);
+ entry = packlist_find(&to_pack, oid->hash, index_pos);
if (!entry)
return 0;
@@ -936,53 +987,102 @@ static int have_duplicate_entry(const unsigned char *sha1,
return 1;
}
+static int want_found_object(int exclude, struct packed_git *p)
+{
+ if (exclude)
+ return 1;
+ if (incremental)
+ return 0;
+
+ /*
+ * When asked to do --local (do not include an object that appears in a
+ * pack we borrow from elsewhere) or --honor-pack-keep (do not include
+ * an object that appears in a pack marked with .keep), finding a pack
+ * that matches the criteria is sufficient for us to decide to omit it.
+ * However, even if this pack does not satisfy the criteria, we need to
+ * make sure no copy of this object appears in _any_ pack that makes us
+ * to omit the object, so we need to check all the packs.
+ *
+ * We can however first check whether these options can possible matter;
+ * if they do not matter we know we want the object in generated pack.
+ * Otherwise, we signal "-1" at the end to tell the caller that we do
+ * not know either way, and it needs to check more packs.
+ */
+ if (!ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
+ (!local || !have_non_local_packs))
+ return 1;
+
+ if (local && !p->pack_local)
+ return 0;
+ if (p->pack_local &&
+ ((ignore_packed_keep_on_disk && p->pack_keep) ||
+ (ignore_packed_keep_in_core && p->pack_keep_in_core)))
+ return 0;
+
+ /* we don't know yet; keep looking for more packs */
+ return -1;
+}
+
/*
* Check whether we want the object in the pack (e.g., we do not want
* objects found in non-local stores if the "--local" option was used).
*
- * As a side effect of this check, we will find the packed version of this
- * object, if any. We therefore pass out the pack information to avoid having
- * to look it up again later.
+ * If the caller already knows an existing pack it wants to take the object
+ * from, that is passed in *found_pack and *found_offset; otherwise this
+ * function finds if there is any pack that has the object and returns the pack
+ * and its offset in these variables.
*/
-static int want_object_in_pack(const unsigned char *sha1,
+static int want_object_in_pack(const struct object_id *oid,
int exclude,
struct packed_git **found_pack,
off_t *found_offset)
{
- struct packed_git *p;
+ int want;
+ struct list_head *pos;
- if (!exclude && local && has_loose_object_nonlocal(sha1))
+ if (!exclude && local && has_loose_object_nonlocal(oid))
return 0;
- *found_pack = NULL;
- *found_offset = 0;
+ /*
+ * If we already know the pack object lives in, start checks from that
+ * pack - in the usual case when neither --local was given nor .keep files
+ * are present we will determine the answer right now.
+ */
+ if (*found_pack) {
+ want = want_found_object(exclude, *found_pack);
+ if (want != -1)
+ return want;
+ }
+ list_for_each(pos, get_packed_git_mru(the_repository)) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
- for (p = packed_git; p; p = p->next) {
- off_t offset = find_pack_entry_one(sha1, p);
if (offset) {
if (!*found_pack) {
- if (!is_pack_valid(p)) {
- warning("packfile %s cannot be accessed", p->pack_name);
+ if (!is_pack_valid(p))
continue;
- }
*found_offset = offset;
*found_pack = p;
}
- if (exclude)
- return 1;
- if (incremental)
- return 0;
- if (local && !p->pack_local)
- return 0;
- if (ignore_packed_keep && p->pack_local && p->pack_keep)
- return 0;
+ want = want_found_object(exclude, p);
+ if (!exclude && want > 0)
+ list_move(&p->mru,
+ get_packed_git_mru(the_repository));
+ if (want != -1)
+ return want;
}
}
return 1;
}
-static void create_object_entry(const unsigned char *sha1,
+static void create_object_entry(const struct object_id *oid,
enum object_type type,
uint32_t hash,
int exclude,
@@ -993,16 +1093,15 @@ static void create_object_entry(const unsigned char *sha1,
{
struct object_entry *entry;
- entry = packlist_alloc(&to_pack, sha1, index_pos);
+ entry = packlist_alloc(&to_pack, oid->hash, index_pos);
entry->hash = hash;
- if (type)
- entry->type = type;
+ oe_set_type(entry, type);
if (exclude)
entry->preferred_base = 1;
else
nr_result++;
if (found_pack) {
- entry->in_pack = found_pack;
+ oe_set_in_pack(&to_pack, entry, found_pack);
entry->in_pack_offset = found_offset;
}
@@ -1013,17 +1112,19 @@ static const char no_closure_warning[] = N_(
"disabling bitmap writing, as some objects are not being packed"
);
-static int add_object_entry(const unsigned char *sha1, enum object_type type,
+static int add_object_entry(const struct object_id *oid, enum object_type type,
const char *name, int exclude)
{
- struct packed_git *found_pack;
- off_t found_offset;
+ struct packed_git *found_pack = NULL;
+ off_t found_offset = 0;
uint32_t index_pos;
- if (have_duplicate_entry(sha1, exclude, &index_pos))
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
- if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {
+ if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
/* The pack is missing an object, so it will not have closure */
if (write_bitmap_index) {
warning(_(no_closure_warning));
@@ -1032,32 +1133,33 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
return 0;
}
- create_object_entry(sha1, type, pack_name_hash(name),
+ create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
-static int add_object_entry_from_bitmap(const unsigned char *sha1,
+static int add_object_entry_from_bitmap(const struct object_id *oid,
enum object_type type,
int flags, uint32_t name_hash,
struct packed_git *pack, off_t offset)
{
uint32_t index_pos;
- if (have_duplicate_entry(sha1, 0, &index_pos))
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
- create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);
+ if (!want_object_in_pack(oid, 0, &pack, &offset))
+ return 0;
- display_progress(progress_state, nr_result);
+ create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
return 1;
}
struct pbase_tree_cache {
- unsigned char sha1[20];
+ struct object_id oid;
int ref;
int temporary;
void *tree_data;
@@ -1065,9 +1167,9 @@ struct pbase_tree_cache {
};
static struct pbase_tree_cache *(pbase_tree_cache[256]);
-static int pbase_tree_cache_ix(const unsigned char *sha1)
+static int pbase_tree_cache_ix(const struct object_id *oid)
{
- return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
+ return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
}
static int pbase_tree_cache_ix_incr(int ix)
{
@@ -1084,14 +1186,14 @@ static struct pbase_tree {
struct pbase_tree_cache pcache;
} *pbase_tree;
-static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
+static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
{
struct pbase_tree_cache *ent, *nent;
void *data;
unsigned long size;
enum object_type type;
int neigh;
- int my_ix = pbase_tree_cache_ix(sha1);
+ int my_ix = pbase_tree_cache_ix(oid);
int available_ix = -1;
/* pbase-tree-cache acts as a limited hashtable.
@@ -1100,7 +1202,7 @@ static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
*/
for (neigh = 0; neigh < 8; neigh++) {
ent = pbase_tree_cache[my_ix];
- if (ent && !hashcmp(ent->sha1, sha1)) {
+ if (ent && !oidcmp(&ent->oid, oid)) {
ent->ref++;
return ent;
}
@@ -1116,7 +1218,7 @@ static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
/* Did not find one. Either we got a bogus request or
* we need to read and perhaps cache.
*/
- data = read_sha1_file(sha1, &type, &size);
+ data = read_object_file(oid, &type, &size);
if (!data)
return NULL;
if (type != OBJ_TREE) {
@@ -1142,7 +1244,7 @@ static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
free(ent->tree_data);
nent = ent;
}
- hashcpy(nent->sha1, sha1);
+ oidcpy(&nent->oid, oid);
nent->tree_data = data;
nent->tree_size = size;
nent->ref = 1;
@@ -1187,7 +1289,7 @@ static void add_pbase_object(struct tree_desc *tree,
if (cmp < 0)
return;
if (name[cmplen] != '/') {
- add_object_entry(entry.sha1,
+ add_object_entry(entry.oid,
object_type(entry.mode),
fullname, 1);
return;
@@ -1198,7 +1300,7 @@ static void add_pbase_object(struct tree_desc *tree,
const char *down = name+cmplen+1;
int downlen = name_cmp_len(down);
- tree = pbase_tree_get(entry.sha1);
+ tree = pbase_tree_get(entry.oid);
if (!tree)
return;
init_tree_desc(&sub, tree->tree_data, tree->tree_size);
@@ -1217,7 +1319,7 @@ static int done_pbase_path_pos(unsigned hash)
int lo = 0;
int hi = done_pbase_paths_num;
while (lo < hi) {
- int mi = (hi + lo) / 2;
+ int mi = lo + (hi - lo) / 2;
if (done_pbase_paths[mi] == hash)
return mi;
if (done_pbase_paths[mi] < hash)
@@ -1230,7 +1332,7 @@ static int done_pbase_path_pos(unsigned hash)
static int check_pbase_path(unsigned hash)
{
- int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
+ int pos = done_pbase_path_pos(hash);
if (0 <= pos)
return 1;
pos = -pos - 1;
@@ -1239,9 +1341,8 @@ static int check_pbase_path(unsigned hash)
done_pbase_paths_alloc);
done_pbase_paths_num++;
if (pos < done_pbase_paths_num)
- memmove(done_pbase_paths + pos + 1,
- done_pbase_paths + pos,
- (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
+ MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
+ done_pbase_paths_num - pos - 1);
done_pbase_paths[pos] = hash;
return 0;
}
@@ -1258,7 +1359,7 @@ static void add_preferred_base_object(const char *name)
cmplen = name_cmp_len(name);
for (it = pbase_tree; it; it = it->next) {
if (cmplen == 0) {
- add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
+ add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
}
else {
struct tree_desc tree;
@@ -1268,22 +1369,22 @@ static void add_preferred_base_object(const char *name)
}
}
-static void add_preferred_base(unsigned char *sha1)
+static void add_preferred_base(struct object_id *oid)
{
struct pbase_tree *it;
void *data;
unsigned long size;
- unsigned char tree_sha1[20];
+ struct object_id tree_oid;
if (window <= num_preferred_base++)
return;
- data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
+ data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
if (!data)
return;
for (it = pbase_tree; it; it = it->next) {
- if (!hashcmp(it->pcache.sha1, tree_sha1)) {
+ if (!oidcmp(&it->pcache.oid, &tree_oid)) {
free(data);
return;
}
@@ -1293,7 +1394,7 @@ static void add_preferred_base(unsigned char *sha1)
it->next = pbase_tree;
pbase_tree = it;
- hashcpy(it->pcache.sha1, tree_sha1);
+ oidcpy(&it->pcache.oid, &tree_oid);
it->pcache.tree_data = data;
it->pcache.tree_size = size;
}
@@ -1306,29 +1407,29 @@ static void cleanup_preferred_base(void)
it = pbase_tree;
pbase_tree = NULL;
while (it) {
- struct pbase_tree *this = it;
- it = this->next;
- free(this->pcache.tree_data);
- free(this);
+ struct pbase_tree *tmp = it;
+ it = tmp->next;
+ free(tmp->pcache.tree_data);
+ free(tmp);
}
for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
if (!pbase_tree_cache[i])
continue;
free(pbase_tree_cache[i]->tree_data);
- free(pbase_tree_cache[i]);
- pbase_tree_cache[i] = NULL;
+ FREE_AND_NULL(pbase_tree_cache[i]);
}
- free(done_pbase_paths);
- done_pbase_paths = NULL;
+ FREE_AND_NULL(done_pbase_paths);
done_pbase_paths_num = done_pbase_paths_alloc = 0;
}
static void check_object(struct object_entry *entry)
{
- if (entry->in_pack) {
- struct packed_git *p = entry->in_pack;
+ unsigned long canonical_size;
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
@@ -1336,6 +1437,8 @@ static void check_object(struct object_entry *entry)
unsigned long avail;
off_t ofs;
unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
@@ -1344,11 +1447,15 @@ static void check_object(struct object_entry *entry)
* since non-delta representations could still be reused.
*/
used = unpack_object_header_buffer(buf, avail,
- &entry->in_pack_type,
- &entry->size);
+ &type,
+ &in_pack_size);
if (used == 0)
goto give_up;
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
/*
* Determine if this is a delta and if so whether we can
* reuse it or not. Otherwise let's find out as cheaply as
@@ -1357,9 +1464,10 @@ static void check_object(struct object_entry *entry)
switch (entry->in_pack_type) {
default:
/* Not a delta hence we've already got all we need. */
- entry->type = entry->in_pack_type;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
entry->in_pack_header_size = used;
- if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
goto give_up;
unuse_pack(&w_curs);
return;
@@ -1367,7 +1475,7 @@ static void check_object(struct object_entry *entry)
if (reuse_delta && !entry->preferred_base)
base_ref = use_pack(p, &w_curs,
entry->in_pack_offset + used, NULL);
- entry->in_pack_header_size = used + 20;
+ entry->in_pack_header_size = used + the_hash_algo->rawsz;
break;
case OBJ_OFS_DELTA:
buf = use_pack(p, &w_curs,
@@ -1378,8 +1486,8 @@ static void check_object(struct object_entry *entry)
while (c & 128) {
ofs += 1;
if (!ofs || MSB(ofs, 7)) {
- error("delta base offset overflow in pack for %s",
- sha1_to_hex(entry->idx.sha1));
+ error(_("delta base offset overflow in pack for %s"),
+ oid_to_hex(&entry->idx.oid));
goto give_up;
}
c = buf[used_0++];
@@ -1387,8 +1495,8 @@ static void check_object(struct object_entry *entry)
}
ofs = entry->in_pack_offset - ofs;
if (ofs <= 0 || ofs >= entry->in_pack_offset) {
- error("delta base offset out of bound for %s",
- sha1_to_hex(entry->idx.sha1));
+ error(_("delta base offset out of bound for %s"),
+ oid_to_hex(&entry->idx.oid));
goto give_up;
}
if (reuse_delta && !entry->preferred_base) {
@@ -1413,25 +1521,29 @@ static void check_object(struct object_entry *entry)
* deltify other objects against, in order to avoid
* circular deltas.
*/
- entry->type = entry->in_pack_type;
- entry->delta = base_entry;
- entry->delta_size = entry->size;
- entry->delta_sibling = base_entry->delta_child;
- base_entry->delta_child = entry;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA(entry, base_entry);
+ SET_DELTA_SIZE(entry, in_pack_size);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
unuse_pack(&w_curs);
return;
}
- if (entry->type) {
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
/*
* This must be a delta and we already know what the
* final object type is. Let's extract the actual
* object size from the delta header.
*/
- entry->size = get_size_from_delta(p, &w_curs,
- entry->in_pack_offset + entry->in_pack_header_size);
- if (entry->size == 0)
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
goto give_up;
+ SET_SIZE(entry, canonical_size);
unuse_pack(&w_curs);
return;
}
@@ -1445,48 +1557,241 @@ static void check_object(struct object_entry *entry)
unuse_pack(&w_curs);
}
- entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
- /*
- * The error condition is checked in prepare_pack(). This is
- * to permit a missing preferred base object to be ignored
- * as a preferred base. Doing so can result in a larger
- * pack file, but the transfer will still take place.
- */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
}
static int pack_offset_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
/* avoid filesystem trashing with loose objects */
- if (!a->in_pack && !b->in_pack)
- return hashcmp(a->idx.sha1, b->idx.sha1);
+ if (!a_in_pack && !b_in_pack)
+ return oidcmp(&a->idx.oid, &b->idx.oid);
- if (a->in_pack < b->in_pack)
+ if (a_in_pack < b_in_pack)
return -1;
- if (a->in_pack > b->in_pack)
+ if (a_in_pack > b_in_pack)
return 1;
return a->in_pack_offset < b->in_pack_offset ? -1 :
(a->in_pack_offset > b->in_pack_offset);
}
+/*
+ * Drop an on-disk delta we were planning to reuse. Naively, this would
+ * just involve blanking out the "delta" field, but we have to deal
+ * with some extra book-keeping:
+ *
+ * 1. Removing ourselves from the delta_sibling linked list.
+ *
+ * 2. Updating our size/type to the non-delta representation. These were
+ * either not recorded initially (size) or overwritten with the delta type
+ * (type) when check_object() decided to reuse the delta.
+ *
+ * 3. Resetting our delta depth, as we are now a base object.
+ */
+static void drop_reused_delta(struct object_entry *entry)
+{
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
+ struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
+
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
+
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
+ else
+ idx = &oe->delta_sibling_idx;
+ }
+ SET_DELTA(entry, NULL);
+ entry->depth = 0;
+
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
+ /*
+ * We failed to get the info from this pack for some reason;
+ * fall back to sha1_object_info, which may find another copy.
+ * And if that fails, the error will be recorded in oe_type(entry)
+ * and dealt with in prepare_pack().
+ */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
+ }
+ SET_SIZE(entry, size);
+}
+
+/*
+ * Follow the chain of deltas from this entry onward, throwing away any links
+ * that cause us to hit a cycle (as determined by the DFS state flags in
+ * the entries).
+ *
+ * We also detect too-long reused chains that would violate our --depth
+ * limit.
+ */
+static void break_delta_chains(struct object_entry *entry)
+{
+ /*
+ * The actual depth of each object we will write is stored as an int,
+ * as it cannot exceed our int "depth" limit. But before we break
+ * changes based no that limit, we may potentially go as deep as the
+ * number of objects, which is elsewhere bounded to a uint32_t.
+ */
+ uint32_t total_depth;
+ struct object_entry *cur, *next;
+
+ for (cur = entry, total_depth = 0;
+ cur;
+ cur = DELTA(cur), total_depth++) {
+ if (cur->dfs_state == DFS_DONE) {
+ /*
+ * We've already seen this object and know it isn't
+ * part of a cycle. We do need to append its depth
+ * to our count.
+ */
+ total_depth += cur->depth;
+ break;
+ }
+
+ /*
+ * We break cycles before looping, so an ACTIVE state (or any
+ * other cruft which made its way into the state variable)
+ * is a bug.
+ */
+ if (cur->dfs_state != DFS_NONE)
+ BUG("confusing delta dfs state in first pass: %d",
+ cur->dfs_state);
+
+ /*
+ * Now we know this is the first time we've seen the object. If
+ * it's not a delta, we're done traversing, but we'll mark it
+ * done to save time on future traversals.
+ */
+ if (!DELTA(cur)) {
+ cur->dfs_state = DFS_DONE;
+ break;
+ }
+
+ /*
+ * Mark ourselves as active and see if the next step causes
+ * us to cycle to another active object. It's important to do
+ * this _before_ we loop, because it impacts where we make the
+ * cut, and thus how our total_depth counter works.
+ * E.g., We may see a partial loop like:
+ *
+ * A -> B -> C -> D -> B
+ *
+ * Cutting B->C breaks the cycle. But now the depth of A is
+ * only 1, and our total_depth counter is at 3. The size of the
+ * error is always one less than the size of the cycle we
+ * broke. Commits C and D were "lost" from A's chain.
+ *
+ * If we instead cut D->B, then the depth of A is correct at 3.
+ * We keep all commits in the chain that we examined.
+ */
+ cur->dfs_state = DFS_ACTIVE;
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
+ drop_reused_delta(cur);
+ cur->dfs_state = DFS_DONE;
+ break;
+ }
+ }
+
+ /*
+ * And now that we've gone all the way to the bottom of the chain, we
+ * need to clear the active flags and set the depth fields as
+ * appropriate. Unlike the loop above, which can quit when it drops a
+ * delta, we need to keep going to look for more depth cuts. So we need
+ * an extra "next" pointer to keep going after we reset cur->delta.
+ */
+ for (cur = entry; cur; cur = next) {
+ next = DELTA(cur);
+
+ /*
+ * We should have a chain of zero or more ACTIVE states down to
+ * a final DONE. We can quit after the DONE, because either it
+ * has no bases, or we've already handled them in a previous
+ * call.
+ */
+ if (cur->dfs_state == DFS_DONE)
+ break;
+ else if (cur->dfs_state != DFS_ACTIVE)
+ BUG("confusing delta dfs state in second pass: %d",
+ cur->dfs_state);
+
+ /*
+ * If the total_depth is more than depth, then we need to snip
+ * the chain into two or more smaller chains that don't exceed
+ * the maximum depth. Most of the resulting chains will contain
+ * (depth + 1) entries (i.e., depth deltas plus one base), and
+ * the last chain (i.e., the one containing entry) will contain
+ * whatever entries are left over, namely
+ * (total_depth % (depth + 1)) of them.
+ *
+ * Since we are iterating towards decreasing depth, we need to
+ * decrement total_depth as we go, and we need to write to the
+ * entry what its final depth will be after all of the
+ * snipping. Since we're snipping into chains of length (depth
+ * + 1) entries, the final depth of an entry will be its
+ * original depth modulo (depth + 1). Any time we encounter an
+ * entry whose final depth is supposed to be zero, we snip it
+ * from its delta base, thereby making it so.
+ */
+ cur->depth = (total_depth--) % (depth + 1);
+ if (!cur->depth)
+ drop_reused_delta(cur);
+
+ cur->dfs_state = DFS_DONE;
+ }
+}
+
static void get_object_details(void)
{
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
- qsort(sorted_by_offset, to_pack.nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
+ QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold < entry->size)
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
+
+ /*
+ * This must happen in a second pass, since we rely on the delta
+ * information for the whole list being completed.
+ */
+ for (i = 0; i < to_pack.nr_objects; i++)
+ break_delta_chains(&to_pack.objects[i]);
free(sorted_by_offset);
}
@@ -1504,10 +1809,14 @@ static int type_size_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ enum object_type a_type = oe_type(a);
+ enum object_type b_type = oe_type(b);
+ unsigned long a_size = SIZE(a);
+ unsigned long b_size = SIZE(b);
- if (a->type > b->type)
+ if (a_type > b_type)
return -1;
- if (a->type < b->type)
+ if (a_type < b_type)
return 1;
if (a->hash > b->hash)
return -1;
@@ -1517,9 +1826,9 @@ static int type_size_sort(const void *_a, const void *_b)
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
- if (a->size > b->size)
+ if (a_size > b_size)
return -1;
- if (a->size < b->size)
+ if (a_size < b_size)
return 1;
return a < b ? -1 : (a > b); /* newest first */
}
@@ -1549,18 +1858,30 @@ static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
#ifndef NO_PTHREADS
+/* Protect access to object database */
static pthread_mutex_t read_mutex;
#define read_lock() pthread_mutex_lock(&read_mutex)
#define read_unlock() pthread_mutex_unlock(&read_mutex)
+/* Protect delta_cache_size */
static pthread_mutex_t cache_mutex;
#define cache_lock() pthread_mutex_lock(&cache_mutex)
#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
+/*
+ * Protect object list partitioning (e.g. struct thread_param) and
+ * progress_state
+ */
static pthread_mutex_t progress_mutex;
#define progress_lock() pthread_mutex_lock(&progress_mutex)
#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
+/*
+ * Access to struct object_entry is unprotected since each thread owns
+ * a portion of the main object list. Just don't access object entries
+ * ahead in the list because they can be stolen and would need
+ * progress_mutex for protection.
+ */
#else
#define read_lock() (void)0
@@ -1572,6 +1893,46 @@ static pthread_mutex_t progress_mutex;
#endif
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
+
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ read_lock();
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ read_unlock();
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ read_lock();
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ read_unlock();
+ return size;
+}
+
static int try_delta(struct unpacked *trg, struct unpacked *src,
unsigned max_depth, unsigned long *mem_usage)
{
@@ -1583,7 +1944,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
void *delta_buf;
/* Don't bother doing diffs between different types */
- if (trg_entry->type != src_entry->type)
+ if (oe_type(trg_entry) != oe_type(src_entry))
return -1;
/*
@@ -1594,8 +1955,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
* it, we will still save the transfer cost, as we already know
* the other side has it and we won't send src_entry at all.
*/
- if (reuse_delta && trg_entry->in_pack &&
- trg_entry->in_pack == src_entry->in_pack &&
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
!src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
@@ -1606,19 +1967,19 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
- max_size = trg_size/2 - 20;
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
+ max_size = trg_size/2 - the_hash_algo->rawsz;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
(max_depth - ref_depth + 1);
if (max_size == 0)
return 0;
- src_size = src_entry->size;
+ src_size = SIZE(src_entry);
sizediff = src_size < trg_size ? trg_size - src_size : 0;
if (sizediff >= max_size)
return 0;
@@ -1628,26 +1989,27 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
/* Load data if not already done */
if (!trg->data) {
read_lock();
- trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
+ trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
- die("object %s cannot be read",
- sha1_to_hex(trg_entry->idx.sha1));
+ die(_("object %s cannot be read"),
+ oid_to_hex(&trg_entry->idx.oid));
if (sz != trg_size)
- die("object %s inconsistent object length (%lu vs %lu)",
- sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
+ oid_to_hex(&trg_entry->idx.oid), sz,
+ trg_size);
*mem_usage += sz;
}
if (!src->data) {
read_lock();
- src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
+ src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
read_unlock();
if (!src->data) {
if (src_entry->preferred_base) {
static int warned = 0;
if (!warned++)
- warning("object %s cannot be read",
- sha1_to_hex(src_entry->idx.sha1));
+ warning(_("object %s cannot be read"),
+ oid_to_hex(&src_entry->idx.oid));
/*
* Those objects are not included in the
* resulting pack. Be resilient and ignore
@@ -1656,12 +2018,13 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
*/
return 0;
}
- die("object %s cannot be read",
- sha1_to_hex(src_entry->idx.sha1));
+ die(_("object %s cannot be read"),
+ oid_to_hex(&src_entry->idx.oid));
}
if (sz != src_size)
- die("object %s inconsistent object length (%lu vs %lu)",
- sha1_to_hex(src_entry->idx.sha1), sz, src_size);
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
+ oid_to_hex(&src_entry->idx.oid), sz,
+ src_size);
*mem_usage += sz;
}
if (!src->index) {
@@ -1669,7 +2032,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (!src->index) {
static int warned = 0;
if (!warned++)
- warning("suboptimal pack - out of memory");
+ warning(_("suboptimal pack - out of memory"));
return 0;
}
*mem_usage += sizeof_delta_index(src->index);
@@ -1679,9 +2042,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (!delta_buf)
return 0;
- if (trg_entry->delta) {
+ if (DELTA(trg_entry)) {
/* Prefer only shallower same-sized deltas. */
- if (delta_size == trg_entry->delta_size &&
+ if (delta_size == DELTA_SIZE(trg_entry) &&
src->depth + 1 >= trg->depth) {
free(delta_buf);
return 0;
@@ -1696,7 +2059,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
free(trg_entry->delta_data);
cache_lock();
if (trg_entry->delta_data) {
- delta_cache_size -= trg_entry->delta_size;
+ delta_cache_size -= DELTA_SIZE(trg_entry);
trg_entry->delta_data = NULL;
}
if (delta_cacheable(src_size, trg_size, delta_size)) {
@@ -1708,8 +2071,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
free(delta_buf);
}
- trg_entry->delta = src_entry;
- trg_entry->delta_size = delta_size;
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
trg->depth = src->depth + 1;
return 1;
@@ -1717,13 +2080,13 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{
- struct object_entry *child = me->delta_child;
+ struct object_entry *child = DELTA_CHILD(me);
unsigned int m = n;
while (child) {
unsigned int c = check_delta_limit(child, n + 1);
if (m < c)
m = c;
- child = child->delta_sibling;
+ child = DELTA_SIBLING(child);
}
return m;
}
@@ -1734,9 +2097,8 @@ static unsigned long free_unpacked(struct unpacked *n)
free_delta_index(n->index);
n->index = NULL;
if (n->data) {
- freed_mem += n->entry->size;
- free(n->data);
- n->data = NULL;
+ freed_mem += SIZE(n->entry);
+ FREE_AND_NULL(n->data);
}
n->entry = NULL;
n->depth = 0;
@@ -1793,7 +2155,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
* otherwise they would become too deep.
*/
max_depth = depth;
- if (entry->delta_child) {
+ if (DELTA_CHILD(entry)) {
max_depth -= check_delta_limit(entry, 0);
if (max_depth <= 0)
goto next;
@@ -1831,19 +2193,26 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
* between writes at that moment.
*/
if (entry->delta_data && !pack_to_stdout) {
- entry->z_delta_size = do_compress(&entry->delta_data,
- entry->delta_size);
- cache_lock();
- delta_cache_size -= entry->delta_size;
- delta_cache_size += entry->z_delta_size;
- cache_unlock();
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
}
/* if we made n a delta, and if n is already at max
* depth, leaving it in the window is pointless. we
* should evict it first.
*/
- if (entry->delta && max_depth <= n->depth)
+ if (DELTA(entry) && max_depth <= n->depth)
continue;
/*
@@ -1851,7 +2220,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
* currently deltified object, to keep it longer. It will
* be the first base object to be attempted next.
*/
- if (entry->delta) {
+ if (DELTA(entry)) {
struct unpacked swap = array[best_base];
int dist = (window + idx - best_base) % window;
int dst = best_base;
@@ -1890,12 +2259,19 @@ static void try_to_free_from_threads(size_t size)
static try_to_free_t old_try_to_free_routine;
/*
+ * The main object list is split into smaller lists, each is handed to
+ * one worker.
+ *
* The main thread waits on the condition that (at least) one of the workers
* has stopped working (which is indicated in the .working member of
* struct thread_params).
+ *
* When a work thread has completed its work, it sets .working to 0 and
* signals the main thread and waits on the condition that .data_ready
* becomes 1.
+ *
+ * The main thread steals half of the work from the worker that has
+ * most work left to hand it to the idle worker.
*/
struct thread_params {
@@ -1923,6 +2299,7 @@ static void init_threaded_search(void)
pthread_mutex_init(&cache_mutex, NULL);
pthread_mutex_init(&progress_mutex, NULL);
pthread_cond_init(&progress_cond, NULL);
+ pthread_mutex_init(&to_pack.lock, NULL);
old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
}
@@ -1939,7 +2316,10 @@ static void *threaded_find_deltas(void *arg)
{
struct thread_params *me = arg;
+ progress_lock();
while (me->remaining) {
+ progress_unlock();
+
find_deltas(me->list, &me->remaining,
me->window, me->depth, me->processed);
@@ -1961,7 +2341,10 @@ static void *threaded_find_deltas(void *arg)
pthread_cond_wait(&me->cond, &me->mutex);
me->data_ready = 0;
pthread_mutex_unlock(&me->mutex);
+
+ progress_lock();
}
+ progress_unlock();
/* leave ->working 1 so that this doesn't get more work assigned */
return NULL;
}
@@ -1980,8 +2363,8 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
return;
}
if (progress > pack_to_stdout)
- fprintf(stderr, "Delta compression using up to %d threads.\n",
- delta_search_threads);
+ fprintf_ln(stderr, _("Delta compression using up to %d threads"),
+ delta_search_threads);
p = xcalloc(delta_search_threads, sizeof(*p));
/* Partition the work amongst work threads. */
@@ -2021,7 +2404,7 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
ret = pthread_create(&p[i].thread, NULL,
threaded_find_deltas, &p[i]);
if (ret)
- die("unable to create thread: %s", strerror(ret));
+ die(_("unable to create thread: %s"), strerror(ret));
active_threads++;
}
@@ -2099,14 +2482,43 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
#endif
-static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+static void add_tag_chain(const struct object_id *oid)
+{
+ struct tag *tag;
+
+ /*
+ * We catch duplicates already in add_object_entry(), but we'd
+ * prefer to do this extra check to avoid having to parse the
+ * tag at all if we already know that it's being packed (e.g., if
+ * it was included via bitmaps, we would not have parsed it
+ * previously).
+ */
+ if (packlist_find(&to_pack, oid->hash, NULL))
+ return;
+
+ tag = lookup_tag(the_repository, oid);
+ while (1) {
+ if (!tag || parse_tag(tag) || !tag->tagged)
+ die(_("unable to pack objects reachable from tag %s"),
+ oid_to_hex(oid));
+
+ add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
+
+ if (tag->tagged->type != OBJ_TAG)
+ return;
+
+ tag = (struct tag *)tag->tagged;
+ }
+}
+
+static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
{
- unsigned char peeled[20];
+ struct object_id peeled;
if (starts_with(path, "refs/tags/") && /* is a tag? */
- !peel_ref(path, peeled) && /* peelable? */
- packlist_find(&to_pack, peeled, NULL)) /* object packed? */
- add_object_entry(sha1, OBJ_TAG, NULL, 0);
+ !peel_ref(path, &peeled) && /* peelable? */
+ packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */
+ add_tag_chain(oid);
return 0;
}
@@ -2131,19 +2543,20 @@ static void prepare_pack(int window, int depth)
if (!to_pack.nr_objects || !window || !depth)
return;
- delta_list = xmalloc(to_pack.nr_objects * sizeof(*delta_list));
+ ALLOC_ARRAY(delta_list, to_pack.nr_objects);
nr_deltas = n = 0;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (entry->delta)
+ if (DELTA(entry))
/* This happens if we decided to reuse existing
* delta from a pack. "reuse_delta &&" is implied.
*/
continue;
- if (entry->size < 50)
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
continue;
if (entry->no_try_delta)
@@ -2151,11 +2564,11 @@ static void prepare_pack(int window, int depth)
if (!entry->preferred_base) {
nr_deltas++;
- if (entry->type < 0)
- die("unable to get type of object %s",
- sha1_to_hex(entry->idx.sha1));
+ if (oe_type(entry) < 0)
+ die(_("unable to get type of object %s"),
+ oid_to_hex(&entry->idx.oid));
} else {
- if (entry->type < 0) {
+ if (oe_type(entry) < 0) {
/*
* This object is not found, but we
* don't have to include it anyway.
@@ -2172,11 +2585,11 @@ static void prepare_pack(int window, int depth)
if (progress)
progress_state = start_progress(_("Compressing objects"),
nr_deltas);
- qsort(delta_list, n, sizeof(*delta_list), type_size_sort);
+ QSORT(delta_list, n, type_size_sort);
ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
stop_progress(&progress_state);
if (nr_done != nr_deltas)
- die("inconsistency with delta count");
+ die(_("inconsistency with delta count"));
}
free(delta_list);
}
@@ -2195,16 +2608,6 @@ static int git_pack_config(const char *k, const char *v, void *cb)
depth = git_config_int(k, v);
return 0;
}
- if (!strcmp(k, "pack.compression")) {
- int level = git_config_int(k, v);
- if (level == -1)
- level = Z_DEFAULT_COMPRESSION;
- else if (level < 0 || level > Z_BEST_COMPRESSION)
- die("bad pack compression level %d", level);
- pack_compression_level = level;
- pack_compression_seen = 1;
- return 0;
- }
if (!strcmp(k, "pack.deltacachesize")) {
max_delta_cache_size = git_config_int(k, v);
return 0;
@@ -2220,24 +2623,26 @@ static int git_pack_config(const char *k, const char *v, void *cb)
write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
}
if (!strcmp(k, "pack.usebitmaps")) {
- use_bitmap_index = git_config_bool(k, v);
+ use_bitmap_index_default = git_config_bool(k, v);
return 0;
}
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
- die("invalid number of threads specified (%d)",
+ die(_("invalid number of threads specified (%d)"),
delta_search_threads);
#ifdef NO_PTHREADS
- if (delta_search_threads != 1)
- warning("no threads support, ignoring %s", k);
+ if (delta_search_threads != 1) {
+ warning(_("no threads support, ignoring %s"), k);
+ delta_search_threads = 0;
+ }
#endif
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die("bad pack.indexversion=%"PRIu32,
+ die(_("bad pack.indexversion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
@@ -2246,66 +2651,116 @@ static int git_pack_config(const char *k, const char *v, void *cb)
static void read_object_list_from_stdin(void)
{
- char line[40 + 1 + PATH_MAX + 2];
- unsigned char sha1[20];
+ char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
+ struct object_id oid;
+ const char *p;
for (;;) {
if (!fgets(line, sizeof(line), stdin)) {
if (feof(stdin))
break;
if (!ferror(stdin))
- die("fgets returned NULL, not EOF, not error!");
+ die("BUG: fgets returned NULL, not EOF, not error!");
if (errno != EINTR)
die_errno("fgets");
clearerr(stdin);
continue;
}
if (line[0] == '-') {
- if (get_sha1_hex(line+1, sha1))
- die("expected edge sha1, got garbage:\n %s",
+ if (get_oid_hex(line+1, &oid))
+ die(_("expected edge object ID, got garbage:\n %s"),
line);
- add_preferred_base(sha1);
+ add_preferred_base(&oid);
continue;
}
- if (get_sha1_hex(line, sha1))
- die("expected sha1, got garbage:\n %s", line);
+ if (parse_oid_hex(line, &oid, &p))
+ die(_("expected object ID, got garbage:\n %s"), line);
- add_preferred_base_object(line+41);
- add_object_entry(sha1, 0, line+41, 0);
+ add_preferred_base_object(p + 1);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
}
}
+/* Remember to update object flag allocation in object.h */
#define OBJECT_ADDED (1u<<20)
static void show_commit(struct commit *commit, void *data)
{
- add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
+ add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
commit->object.flags |= OBJECT_ADDED;
if (write_bitmap_index)
index_commit_for_bitmap(commit);
}
-static void show_object(struct object *obj,
- const struct name_path *path, const char *last,
- void *data)
+static void show_object(struct object *obj, const char *name, void *data)
{
- char *name = path_name(path, last);
-
add_preferred_base_object(name);
- add_object_entry(obj->sha1, obj->type, name, 0);
+ add_object_entry(&obj->oid, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
+}
+
+static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_ANY);
+
+ /*
+ * Quietly ignore ALL missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object_file(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
/*
- * We will have generated the hash from the name,
- * but not saved a pointer to it - we can free it
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
*/
- free((char *)name);
+ if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
+static int option_parse_missing_action(const struct option *opt,
+ const char *arg, int unset)
+{
+ assert(arg);
+ assert(!unset);
+
+ if (!strcmp(arg, "error")) {
+ arg_missing_action = MA_ERROR;
+ fn_show_object = show_object;
+ return 0;
+ }
+
+ if (!strcmp(arg, "allow-any")) {
+ arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_any;
+ return 0;
+ }
+
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
+ die(_("invalid value for --missing"));
+ return 0;
}
static void show_edge(struct commit *commit)
{
- add_preferred_base(commit->object.sha1);
+ add_preferred_base(&commit->object.oid);
}
struct in_pack_object {
@@ -2314,14 +2769,14 @@ struct in_pack_object {
};
struct in_pack {
- int alloc;
- int nr;
+ unsigned int alloc;
+ unsigned int nr;
struct in_pack_object *array;
};
static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
{
- in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
+ in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
in_pack->array[in_pack->nr].object = object;
in_pack->nr++;
}
@@ -2340,7 +2795,7 @@ static int ofscmp(const void *a_, const void *b_)
else if (a->offset > b->offset)
return 1;
else
- return hashcmp(a->object->sha1, b->object->sha1);
+ return oidcmp(&a->object->oid, &b->object->oid);
}
static void add_objects_in_unpacked_packs(struct rev_info *revs)
@@ -2351,22 +2806,22 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
memset(&in_pack, 0, sizeof(in_pack));
- for (p = packed_git; p; p = p->next) {
- const unsigned char *sha1;
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ struct object_id oid;
struct object *o;
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
ALLOC_GROW(in_pack.array,
in_pack.nr + p->num_objects,
in_pack.alloc);
for (i = 0; i < p->num_objects; i++) {
- sha1 = nth_packed_object_sha1(p, i);
- o = lookup_unknown_object(sha1);
+ nth_packed_object_oid(&oid, p, i);
+ o = lookup_unknown_object(oid.hash);
if (!(o->flags & OBJECT_ADDED))
mark_in_pack_object(o, p, &in_pack);
o->flags |= OBJECT_ADDED;
@@ -2374,31 +2829,58 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
}
if (in_pack.nr) {
- qsort(in_pack.array, in_pack.nr, sizeof(in_pack.array[0]),
- ofscmp);
+ QSORT(in_pack.array, in_pack.nr, ofscmp);
for (i = 0; i < in_pack.nr; i++) {
struct object *o = in_pack.array[i].object;
- add_object_entry(o->sha1, o->type, "", 0);
+ add_object_entry(&o->oid, o->type, "", 0);
}
}
free(in_pack.array);
}
-static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
+static int add_loose_object(const struct object_id *oid, const char *path,
+ void *data)
+{
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
+
+ if (type < 0) {
+ warning(_("loose object at %s could not be examined"), path);
+ return 0;
+ }
+
+ add_object_entry(oid, type, "", 0);
+ return 0;
+}
+
+/*
+ * We actually don't even have to worry about reachability here.
+ * add_object_entry will weed out duplicates, so we just add every
+ * loose object we find.
+ */
+static void add_unreachable_loose_objects(void)
+{
+ for_each_loose_file_in_objdir(get_object_directory(),
+ add_loose_object,
+ NULL, NULL, NULL);
+}
+
+static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
{
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
- p = (last_found != (void *)1) ? last_found : packed_git;
+ p = (last_found != (void *)1) ? last_found :
+ get_packed_git(the_repository);
while (p) {
- if ((!p->pack_local || p->pack_keep) &&
- find_pack_entry_one(sha1, p)) {
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
+ find_pack_entry_one(oid->hash, p)) {
last_found = p;
return 1;
}
if (p == last_found)
- p = packed_git;
+ p = get_packed_git(the_repository);
else
p = p->next;
if (p == last_found)
@@ -2414,16 +2896,16 @@ static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
*
* This is filled by get_object_list.
*/
-static struct sha1_array recent_objects;
+static struct oid_array recent_objects;
-static int loosened_object_can_be_discarded(const unsigned char *sha1,
- unsigned long mtime)
+static int loosened_object_can_be_discarded(const struct object_id *oid,
+ timestamp_t mtime)
{
if (!unpack_unreachable_expiration)
return 0;
if (mtime > unpack_unreachable_expiration)
return 0;
- if (sha1_array_lookup(&recent_objects, sha1) >= 0)
+ if (oid_array_lookup(&recent_objects, oid) >= 0)
return 0;
return 1;
}
@@ -2432,43 +2914,50 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
{
struct packed_git *p;
uint32_t i;
- const unsigned char *sha1;
+ struct object_id oid;
- for (p = packed_git; p; p = p->next) {
- if (!p->pack_local || p->pack_keep)
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
for (i = 0; i < p->num_objects; i++) {
- sha1 = nth_packed_object_sha1(p, i);
- if (!packlist_find(&to_pack, sha1, NULL) &&
- !has_sha1_pack_kept_or_nonlocal(sha1) &&
- !loosened_object_can_be_discarded(sha1, p->mtime))
- if (force_object_loose(sha1, p->mtime))
- die("unable to force loose object");
+ nth_packed_object_oid(&oid, p, i);
+ if (!packlist_find(&to_pack, oid.hash, NULL) &&
+ !has_sha1_pack_kept_or_nonlocal(&oid) &&
+ !loosened_object_can_be_discarded(&oid, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
+ die(_("unable to force loose object"));
}
}
}
/*
- * This tracks any options which a reader of the pack might
- * not understand, and which would therefore prevent blind reuse
- * of what we have on disk.
+ * This tracks any options which pack-reuse code expects to be on, or which a
+ * reader of the pack might not understand, and which would therefore prevent
+ * blind reuse of what we have on disk.
*/
static int pack_options_allow_reuse(void)
{
- return allow_ofs_delta;
+ return pack_to_stdout &&
+ allow_ofs_delta &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
+ (!local || !have_non_local_packs) &&
+ !incremental;
}
static int get_object_list_from_bitmap(struct rev_info *revs)
{
- if (prepare_bitmap_walk(revs) < 0)
+ struct bitmap_index *bitmap_git;
+ if (!(bitmap_git = prepare_bitmap_walk(revs)))
return -1;
if (pack_options_allow_reuse() &&
!reuse_partial_packfile_from_bitmap(
+ bitmap_git,
&reuse_packfile,
&reuse_packfile_objects,
&reuse_packfile_offset)) {
@@ -2477,21 +2966,21 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
display_progress(progress_state, nr_result);
}
- traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
+ traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
+ free_bitmap_index(bitmap_git);
return 0;
}
static void record_recent_object(struct object *obj,
- const struct name_path *path,
- const char *last,
+ const char *name,
void *data)
{
- sha1_array_append(&recent_objects, obj->sha1);
+ oid_array_append(&recent_objects, &obj->oid);
}
static void record_recent_commit(struct commit *commit, void *data)
{
- sha1_array_append(&recent_objects, commit->object.sha1);
+ oid_array_append(&recent_objects, &commit->object.oid);
}
static void get_object_list(int ac, const char **av)
@@ -2505,7 +2994,7 @@ static void get_object_list(int ac, const char **av)
setup_revisions(ac, av, &revs, NULL);
/* make sure shallows are read */
- is_repository_shallow();
+ is_repository_shallow(the_repository);
while (fgets(line, sizeof(line), stdin) != NULL) {
int len = strlen(line);
@@ -2520,44 +3009,77 @@ static void get_object_list(int ac, const char **av)
continue;
}
if (starts_with(line, "--shallow ")) {
- unsigned char sha1[20];
- if (get_sha1_hex(line + 10, sha1))
+ struct object_id oid;
+ if (get_oid_hex(line + 10, &oid))
die("not an SHA-1 '%s'", line + 10);
- register_shallow(sha1);
+ register_shallow(the_repository, &oid);
use_bitmap_index = 0;
continue;
}
- die("not a rev '%s'", line);
+ die(_("not a rev '%s'"), line);
}
if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
- die("bad revision '%s'", line);
+ die(_("bad revision '%s'"), line);
}
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge);
- traverse_commit_list(&revs, show_commit, show_object, NULL);
+
+ if (!fn_show_object)
+ fn_show_object = show_object;
+ traverse_commit_list_filtered(&filter_options, &revs,
+ show_commit, fn_show_object, NULL,
+ NULL);
if (unpack_unreachable_expiration) {
revs.ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(&revs,
unpack_unreachable_expiration))
- die("unable to add recent objects");
+ die(_("unable to add recent objects"));
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
traverse_commit_list(&revs, record_recent_commit,
record_recent_object, NULL);
}
if (keep_unreachable)
add_objects_in_unpacked_packs(&revs);
+ if (pack_loose_unreachable)
+ add_unreachable_loose_objects();
if (unpack_unreachable)
loosen_unused_packed_objects(&revs);
- sha1_array_clear(&recent_objects);
+ oid_array_clear(&recent_objects);
+}
+
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
}
static int option_parse_index_version(const struct option *opt,
@@ -2590,23 +3112,6 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
-static int option_parse_ulong(const struct option *opt,
- const char *arg, int unset)
-{
- if (unset)
- die(_("option %s does not accept negative form"),
- opt->long_name);
-
- if (!git_parse_ulong(arg, opt->value))
- die(_("unable to parse value '%s' for option %s"),
- arg, opt->long_name);
- return 0;
-}
-
-#define OPT_ULONG(s, l, v, h) \
- { OPTION_CALLBACK, (s), (l), (v), "n", (h), \
- PARSE_OPT_NONEG, option_parse_ulong }
-
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -2616,6 +3121,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
struct argv_array rp = ARGV_ARRAY_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
@@ -2626,19 +3132,19 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "all-progress-implied",
&all_progress_implied,
N_("similar to --all-progress when progress meter is shown")),
- { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
+ { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
N_("write the pack index file in the specified idx format version"),
0, option_parse_index_version },
- OPT_ULONG(0, "max-pack-size", &pack_size_limit,
- N_("maximum size of each output pack file")),
+ OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
+ N_("maximum size of each output pack file")),
OPT_BOOL(0, "local", &local,
N_("ignore borrowed objects from alternate object store")),
OPT_BOOL(0, "incremental", &incremental,
N_("ignore packed objects")),
OPT_INTEGER(0, "window", &window,
N_("limit pack window by objects")),
- OPT_ULONG(0, "window-memory", &window_memory_limit,
- N_("limit pack window by memory in addition to object limit")),
+ OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
+ N_("limit pack window by memory in addition to object limit")),
OPT_INTEGER(0, "depth", &depth,
N_("maximum length of delta chain allowed in the resulting pack")),
OPT_BOOL(0, "reuse-delta", &reuse_delta,
@@ -2653,24 +3159,26 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("do not create an empty pack output")),
OPT_BOOL(0, "revs", &use_internal_rev_list,
N_("read revision arguments from standard input")),
- { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
- N_("limit the objects to those that are not yet packed"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
- N_("include objects reachable from any reference"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
- N_("include objects referred by reflog entries"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
- N_("include objects referred to by the index"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
+ OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
+ N_("limit the objects to those that are not yet packed"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "all", &rev_list_all,
+ N_("include objects reachable from any reference"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
+ N_("include objects referred by reflog entries"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
+ N_("include objects referred to by the index"),
+ 1, PARSE_OPT_NONEG),
OPT_BOOL(0, "stdout", &pack_to_stdout,
N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
N_("include tag objects that refer to objects to be packed")),
OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
N_("keep unreachable objects")),
+ OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
+ N_("pack loose unreachable objects")),
{ OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
N_("unpack unreachable objects newer than <time>"),
PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
@@ -2678,8 +3186,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("create thin packs")),
OPT_BOOL(0, "shallow", &shallow,
N_("create packs suitable for shallow fetches")),
- OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
OPT_INTEGER(0, "compression", &pack_compression_level,
N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
@@ -2688,15 +3198,22 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("use a bitmap index if available to speed up counting objects")),
OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
N_("write a bitmap index together with the pack index")),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
+ N_("handling for missing objects"), PARSE_OPT_NONEG,
+ option_parse_missing_action },
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
OPT_END(),
};
- check_replace_refs = 0;
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
+ read_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
- if (!pack_compression_seen && core_compression_seen)
- pack_compression_level = core_compression_level;
progress = isatty(2);
argc = parse_options(argc, argv, prefix, pack_objects_options,
@@ -2709,6 +3226,17 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (pack_to_stdout != !base_name || argc)
usage_with_options(pack_usage, pack_objects_options);
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+
argv_array_push(&rp, "pack-objects");
if (thin) {
use_internal_rev_list = 1;
@@ -2735,38 +3263,68 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
argv_array_push(&rp, "--unpacked");
}
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ argv_array_push(&rp, "--exclude-promisor-objects");
+ }
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
+
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
pack_compression_level = Z_DEFAULT_COMPRESSION;
else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
- die("bad pack compression level %d", pack_compression_level);
+ die(_("bad pack compression level %d"), pack_compression_level);
if (!delta_search_threads) /* --threads=0 means autodetect */
delta_search_threads = online_cpus();
#ifdef NO_PTHREADS
if (delta_search_threads != 1)
- warning("no threads support, ignoring --threads");
+ warning(_("no threads support, ignoring --threads"));
#endif
if (!pack_to_stdout && !pack_size_limit)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
- die("--max-pack-size cannot be used to build a pack for transfer.");
+ die(_("--max-pack-size cannot be used to build a pack for transfer"));
if (pack_size_limit && pack_size_limit < 1024*1024) {
- warning("minimum pack size limit is 1 MiB");
+ warning(_("minimum pack size limit is 1 MiB"));
pack_size_limit = 1024*1024;
}
if (!pack_to_stdout && thin)
- die("--thin cannot be used to build an indexable pack.");
+ die(_("--thin cannot be used to build an indexable pack"));
if (keep_unreachable && unpack_unreachable)
- die("--keep-unreachable and --unpack-unreachable are incompatible.");
+ die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (!use_internal_rev_list || !pack_to_stdout || is_repository_shallow())
+ if (filter_options.choice) {
+ if (!pack_to_stdout)
+ die(_("cannot use --filter without --stdout"));
+ use_bitmap_index = 0;
+ }
+
+ /*
+ * "soft" reasons not to use bitmaps - for on-disk repack by default we want
+ *
+ * - to produce good pack (with bitmap index not-yet-packed objects are
+ * packed in suboptimal order).
+ *
+ * - to use more robust pack-generation codepath (avoiding possible
+ * bugs in bitmap code and possible bitmap index corruption).
+ */
+ if (!pack_to_stdout)
+ use_bitmap_index_default = 0;
+
+ if (use_bitmap_index < 0)
+ use_bitmap_index = use_bitmap_index_default;
+
+ /* "hard" reasons not to use bitmaps; these just won't work at all */
+ if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
use_bitmap_index = 0;
if (pack_to_stdout || !rev_list_all)
@@ -2775,10 +3333,34 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (progress && all_progress_implied)
progress = 2;
- prepare_packed_git();
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
+ struct packed_git *p;
+ for (p = get_packed_git(the_repository); p; p = p->next)
+ if (p->pack_local && p->pack_keep)
+ break;
+ if (!p) /* no keep-able packs found */
+ ignore_packed_keep_on_disk = 0;
+ }
+ if (local) {
+ /*
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
+ */
+ struct packed_git *p;
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ if (!p->pack_local) {
+ have_non_local_packs = 1;
+ break;
+ }
+ }
+ }
+
+ prepare_packing_data(&to_pack);
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
@@ -2796,8 +3378,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
prepare_pack(window, depth);
write_pack_file();
if (progress)
- fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
- " reused %"PRIu32" (delta %"PRIu32")\n",
- written, written_delta, reused, reused_delta);
+ fprintf_ln(stderr,
+ _("Total %"PRIu32" (delta %"PRIu32"),"
+ " reused %"PRIu32" (delta %"PRIu32")"),
+ written, written_delta, reused, reused_delta);
return 0;
}