summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLibravatar Junio C Hamano <gitster@pobox.com>2010-02-02 21:47:51 -0800
committerLibravatar Junio C Hamano <gitster@pobox.com>2010-02-02 21:47:51 -0800
commitd3b91fad1843e98fde9c287f72315bd456e18f2c (patch)
tree4655f38776fcb76c4b9470f7d2e79b9bf6938948
parentCorrect spelling of 'REUC' extension (diff)
parentMerge branch 'sp/maint-fast-import-large-blob' into sp/fast-import-large-blob (diff)
downloadtgif-d3b91fad1843e98fde9c287f72315bd456e18f2c.tar.xz
Merge branch 'sp/fast-import-large-blob'
* sp/fast-import-large-blob: fast-import: Stream very large blobs directly to pack
-rw-r--r--Documentation/config.txt14
-rw-r--r--Documentation/git-fast-import.txt6
-rw-r--r--fast-import.c174
-rwxr-xr-xt/t5705-clone-2gb.sh2
-rwxr-xr-xt/t9300-fast-import.sh46
5 files changed, 223 insertions, 19 deletions
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 17901e244a..36ad992a56 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -417,6 +417,20 @@ You probably do not need to adjust this value.
+
Common unit suffixes of 'k', 'm', or 'g' are supported.
+core.bigFileThreshold::
+ Files larger than this size are stored deflated, without
+ attempting delta compression. Storing large files without
+ delta compression avoids excessive memory usage, at the
+ slight expense of increased disk usage.
++
+Default is 512 MiB on all platforms. This should be reasonable
+for most projects as source code and other text files can still
+be delta compressed, but larger binary media files won't be.
++
+Common unit suffixes of 'k', 'm', or 'g' are supported.
++
+Currently only linkgit:git-fast-import[1] honors this setting.
+
core.excludesfile::
In addition to '.gitignore' (per-directory) and
'.git/info/exclude', git looks into this file for patterns
diff --git a/Documentation/git-fast-import.txt b/Documentation/git-fast-import.txt
index fa7d2fe142..2691114440 100644
--- a/Documentation/git-fast-import.txt
+++ b/Documentation/git-fast-import.txt
@@ -50,6 +50,12 @@ OPTIONS
importers may wish to lower this, such as to ensure the
resulting packfiles fit on CDs.
+--big-file-threshold=<n>::
+ Maximum size of a blob that fast-import will attempt to
+ create a delta for, expressed in bytes. The default is 512m
+ (512 MiB). Some importers may wish to lower this on systems
+ with constrained memory.
+
--depth=<n>::
Maximum delta depth, for blob and tree deltification.
Default is 10.
diff --git a/fast-import.c b/fast-import.c
index 901784fe91..ca210822db 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -281,6 +281,7 @@ struct recent_command
/* Configured limits on output */
static unsigned long max_depth = 10;
static off_t max_packsize = (1LL << 32) - 1;
+static uintmax_t big_file_threshold = 512 * 1024 * 1024;
static int force_update;
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
static int pack_compression_seen;
@@ -1014,7 +1015,7 @@ static void cycle_packfile(void)
static size_t encode_header(
enum object_type type,
- size_t size,
+ uintmax_t size,
unsigned char *hdr)
{
int n = 1;
@@ -1170,6 +1171,118 @@ static int store_object(
return 0;
}
+static void truncate_pack(off_t to)
+{
+ if (ftruncate(pack_data->pack_fd, to)
+ || lseek(pack_data->pack_fd, to, SEEK_SET) != to)
+ die_errno("cannot truncate pack to skip duplicate");
+ pack_size = to;
+}
+
+static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
+{
+ size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
+ unsigned char *in_buf = xmalloc(in_sz);
+ unsigned char *out_buf = xmalloc(out_sz);
+ struct object_entry *e;
+ unsigned char sha1[20];
+ unsigned long hdrlen;
+ off_t offset;
+ git_SHA_CTX c;
+ z_stream s;
+ int status = Z_OK;
+
+ /* Determine if we should auto-checkpoint. */
+ if ((pack_size + 60 + len) > max_packsize
+ || (pack_size + 60 + len) < pack_size)
+ cycle_packfile();
+
+ offset = pack_size;
+
+ hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
+ if (out_sz <= hdrlen)
+ die("impossibly large object header");
+
+ git_SHA1_Init(&c);
+ git_SHA1_Update(&c, out_buf, hdrlen);
+
+ memset(&s, 0, sizeof(s));
+ deflateInit(&s, pack_compression_level);
+
+ hdrlen = encode_header(OBJ_BLOB, len, out_buf);
+ if (out_sz <= hdrlen)
+ die("impossibly large object header");
+
+ s.next_out = out_buf + hdrlen;
+ s.avail_out = out_sz - hdrlen;
+
+ while (status != Z_STREAM_END) {
+ if (0 < len && !s.avail_in) {
+ size_t cnt = in_sz < len ? in_sz : (size_t)len;
+ size_t n = fread(in_buf, 1, cnt, stdin);
+ if (!n && feof(stdin))
+ die("EOF in data (%" PRIuMAX " bytes remaining)", len);
+
+ git_SHA1_Update(&c, in_buf, n);
+ s.next_in = in_buf;
+ s.avail_in = n;
+ len -= n;
+ }
+
+ status = deflate(&s, len ? 0 : Z_FINISH);
+
+ if (!s.avail_out || status == Z_STREAM_END) {
+ size_t n = s.next_out - out_buf;
+ write_or_die(pack_data->pack_fd, out_buf, n);
+ pack_size += n;
+ s.next_out = out_buf;
+ s.avail_out = out_sz;
+ }
+
+ switch (status) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ continue;
+ default:
+ die("unexpected deflate failure: %d", status);
+ }
+ }
+ deflateEnd(&s);
+ git_SHA1_Final(sha1, &c);
+
+ if (sha1out)
+ hashcpy(sha1out, sha1);
+
+ e = insert_object(sha1);
+
+ if (mark)
+ insert_mark(mark, e);
+
+ if (e->offset) {
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(offset);
+
+ } else if (find_sha1_pack(sha1, packed_git)) {
+ e->type = OBJ_BLOB;
+ e->pack_id = MAX_PACK_ID;
+ e->offset = 1; /* just not zero! */
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(offset);
+
+ } else {
+ e->depth = 0;
+ e->type = OBJ_BLOB;
+ e->pack_id = pack_id;
+ e->offset = offset;
+ object_count++;
+ object_count_by_type[OBJ_BLOB]++;
+ }
+
+ free(in_buf);
+ free(out_buf);
+}
+
/* All calls must be guarded by find_object() or find_mark() to
* ensure the 'struct object_entry' passed was written by this
* process instance. We unpack the entry by the offset, avoiding
@@ -1757,7 +1870,7 @@ static void parse_mark(void)
next_mark = 0;
}
-static void parse_data(struct strbuf *sb)
+static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
{
strbuf_reset(sb);
@@ -1781,9 +1894,15 @@ static void parse_data(struct strbuf *sb)
free(term);
}
else {
- size_t n = 0, length;
+ uintmax_t len = strtoumax(command_buf.buf + 5, NULL, 10);
+ size_t n = 0, length = (size_t)len;
- length = strtoul(command_buf.buf + 5, NULL, 10);
+ if (limit && limit < len) {
+ *len_res = len;
+ return 0;
+ }
+ if (length < len)
+ die("data is too large to use in this context");
while (n < length) {
size_t s = strbuf_fread(sb, length - n, stdin);
@@ -1795,6 +1914,7 @@ static void parse_data(struct strbuf *sb)
}
skip_optional_lf();
+ return 1;
}
static int validate_raw_date(const char *src, char *result, int maxlen)
@@ -1859,14 +1979,32 @@ static char *parse_ident(const char *buf)
return ident;
}
-static void parse_new_blob(void)
+static void parse_and_store_blob(
+ struct last_object *last,
+ unsigned char *sha1out,
+ uintmax_t mark)
{
static struct strbuf buf = STRBUF_INIT;
+ uintmax_t len;
+ if (parse_data(&buf, big_file_threshold, &len))
+ store_object(OBJ_BLOB, &buf, last, sha1out, mark);
+ else {
+ if (last) {
+ strbuf_release(&last->data);
+ last->offset = 0;
+ last->depth = 0;
+ }
+ stream_blob(len, sha1out, mark);
+ skip_optional_lf();
+ }
+}
+
+static void parse_new_blob(void)
+{
read_next_command();
parse_mark();
- parse_data(&buf);
- store_object(OBJ_BLOB, &buf, &last_blob, NULL, next_mark);
+ parse_and_store_blob(&last_blob, NULL, next_mark);
}
static void unload_one_branch(void)
@@ -2080,15 +2218,12 @@ static void file_change_m(struct branch *b)
* another repository.
*/
} else if (inline_data) {
- static struct strbuf buf = STRBUF_INIT;
-
if (p != uq.buf) {
strbuf_addstr(&uq, p);
p = uq.buf;
}
read_next_command();
- parse_data(&buf);
- store_object(OBJ_BLOB, &buf, &last_blob, sha1, 0);
+ parse_and_store_blob(&last_blob, sha1, 0);
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
@@ -2216,15 +2351,12 @@ static void note_change_n(struct branch *b, unsigned char old_fanout)
die("Invalid ref name or SHA1 expression: %s", p);
if (inline_data) {
- static struct strbuf buf = STRBUF_INIT;
-
if (p != uq.buf) {
strbuf_addstr(&uq, p);
p = uq.buf;
}
read_next_command();
- parse_data(&buf);
- store_object(OBJ_BLOB, &buf, &last_blob, sha1, 0);
+ parse_and_store_blob(&last_blob, sha1, 0);
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
@@ -2401,7 +2533,7 @@ static void parse_new_commit(void)
}
if (!committer)
die("Expected committer but didn't get one");
- parse_data(&msg);
+ parse_data(&msg, 0, NULL);
read_next_command();
parse_from(b);
merge_list = parse_merge(&merge_count);
@@ -2528,7 +2660,7 @@ static void parse_new_tag(void)
tagger = NULL;
/* tag payload/message */
- parse_data(&msg);
+ parse_data(&msg, 0, NULL);
/* build the tag object */
strbuf_reset(&new_data);
@@ -2667,6 +2799,8 @@ static int parse_one_option(const char *option)
{
if (!prefixcmp(option, "max-pack-size=")) {
option_max_pack_size(option + 14);
+ } else if (!prefixcmp(option, "big-file-threshold=")) {
+ big_file_threshold = strtoumax(option + 19, NULL, 0) * 1024 * 1024;
} else if (!prefixcmp(option, "depth=")) {
option_depth(option + 6);
} else if (!prefixcmp(option, "active-branches=")) {
@@ -2749,11 +2883,15 @@ static int git_pack_config(const char *k, const char *v, void *cb)
pack_compression_seen = 1;
return 0;
}
+ if (!strcmp(k, "core.bigfilethreshold")) {
+ long n = git_config_int(k, v);
+ big_file_threshold = 0 < n ? n : 0;
+ }
return git_default_config(k, v, cb);
}
static const char fast_import_usage[] =
-"git fast-import [--date-format=f] [--max-pack-size=n] [--depth=n] [--active-branches=n] [--export-marks=marks.file]";
+"git fast-import [--date-format=f] [--max-pack-size=n] [--big-file-threshold=n] [--depth=n] [--active-branches=n] [--export-marks=marks.file]";
static void parse_argv(void)
{
diff --git a/t/t5705-clone-2gb.sh b/t/t5705-clone-2gb.sh
index 9f52154cac..adfaae8c5b 100755
--- a/t/t5705-clone-2gb.sh
+++ b/t/t5705-clone-2gb.sh
@@ -31,7 +31,7 @@ test_expect_success 'setup' '
echo "data 5" &&
echo ">2gb" &&
cat commit) |
- git fast-import &&
+ git fast-import --big-file-threshold=2 &&
test ! -f exit-status
'
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index 60d6f5d1ba..131f032988 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -1536,4 +1536,50 @@ test_expect_success 'R: ignore non-git options' '
git fast-import <input
'
+##
+## R: very large blobs
+##
+blobsize=$((2*1024*1024 + 53))
+test-genrandom bar $blobsize >expect
+cat >input <<INPUT_END
+commit refs/heads/big-file
+committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+data <<COMMIT
+R - big file
+COMMIT
+
+M 644 inline big1
+data $blobsize
+INPUT_END
+cat expect >>input
+cat >>input <<INPUT_END
+M 644 inline big2
+data $blobsize
+INPUT_END
+cat expect >>input
+echo >>input
+
+test_expect_success \
+ 'R: blob bigger than threshold' \
+ 'test_create_repo R &&
+ git --git-dir=R/.git fast-import --big-file-threshold=1 <input'
+test_expect_success \
+ 'R: verify created pack' \
+ ': >verify &&
+ for p in R/.git/objects/pack/*.pack;
+ do
+ git verify-pack -v $p >>verify || exit;
+ done'
+test_expect_success \
+ 'R: verify written objects' \
+ 'git --git-dir=R/.git cat-file blob big-file:big1 >actual &&
+ test_cmp expect actual &&
+ a=$(git --git-dir=R/.git rev-parse big-file:big1) &&
+ b=$(git --git-dir=R/.git rev-parse big-file:big2) &&
+ test $a = $b'
+test_expect_success \
+ 'R: blob appears only once' \
+ 'n=$(grep $a verify | wc -l) &&
+ test 1 = $n'
+
test_done