summaryrefslogtreecommitdiff
path: root/fast-import.c
diff options
context:
space:
mode:
Diffstat (limited to 'fast-import.c')
-rw-r--r--fast-import.c268
1 files changed, 246 insertions, 22 deletions
diff --git a/fast-import.c b/fast-import.c
index 36c5a8ebd9..2105310c54 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -19,11 +19,11 @@ Format of STDIN stream:
new_commit ::= 'commit' sp ref_str lf
mark?
- ('author' sp name sp '<' email '>' sp when lf)?
- 'committer' sp name sp '<' email '>' sp when lf
+ ('author' (sp name)? sp '<' email '>' sp when lf)?
+ 'committer' (sp name)? sp '<' email '>' sp when lf
commit_msg
- ('from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)?
- ('merge' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)*
+ ('from' sp committish lf)?
+ ('merge' sp committish lf)*
file_change*
lf?;
commit_msg ::= data;
@@ -41,15 +41,18 @@ Format of STDIN stream:
file_obm ::= 'M' sp mode sp (hexsha1 | idnum) sp path_str lf;
file_inm ::= 'M' sp mode sp 'inline' sp path_str lf
data;
+ note_obm ::= 'N' sp (hexsha1 | idnum) sp committish lf;
+ note_inm ::= 'N' sp 'inline' sp committish lf
+ data;
new_tag ::= 'tag' sp tag_str lf
- 'from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf
- ('tagger' sp name sp '<' email '>' sp when lf)?
+ 'from' sp committish lf
+ ('tagger' (sp name)? sp '<' email '>' sp when lf)?
tag_msg;
tag_msg ::= data;
reset_branch ::= 'reset' sp ref_str lf
- ('from' sp (ref_str | hexsha1 | sha1exp_str | idnum) lf)?
+ ('from' sp committish lf)?
lf?;
checkpoint ::= 'checkpoint' lf
@@ -88,6 +91,7 @@ Format of STDIN stream:
# stream formatting is: \, " and LF. Otherwise these values
# are UTF8.
#
+ committish ::= (ref_str | hexsha1 | sha1exp_str | idnum);
ref_str ::= ref;
sha1exp_str ::= sha1exp;
tag_str ::= tag;
@@ -276,6 +280,7 @@ struct recent_command
/* Configured limits on output */
static unsigned long max_depth = 10;
static off_t max_packsize = (1LL << 32) - 1;
+static uintmax_t big_file_threshold = 512 * 1024 * 1024;
static int force_update;
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
static int pack_compression_seen;
@@ -999,7 +1004,7 @@ static void cycle_packfile(void)
static size_t encode_header(
enum object_type type,
- size_t size,
+ uintmax_t size,
unsigned char *hdr)
{
int n = 1;
@@ -1155,6 +1160,118 @@ static int store_object(
return 0;
}
+static void truncate_pack(off_t to)
+{
+ if (ftruncate(pack_data->pack_fd, to)
+ || lseek(pack_data->pack_fd, to, SEEK_SET) != to)
+ die_errno("cannot truncate pack to skip duplicate");
+ pack_size = to;
+}
+
+static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
+{
+ size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
+ unsigned char *in_buf = xmalloc(in_sz);
+ unsigned char *out_buf = xmalloc(out_sz);
+ struct object_entry *e;
+ unsigned char sha1[20];
+ unsigned long hdrlen;
+ off_t offset;
+ git_SHA_CTX c;
+ z_stream s;
+ int status = Z_OK;
+
+ /* Determine if we should auto-checkpoint. */
+ if ((pack_size + 60 + len) > max_packsize
+ || (pack_size + 60 + len) < pack_size)
+ cycle_packfile();
+
+ offset = pack_size;
+
+ hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
+ if (out_sz <= hdrlen)
+ die("impossibly large object header");
+
+ git_SHA1_Init(&c);
+ git_SHA1_Update(&c, out_buf, hdrlen);
+
+ memset(&s, 0, sizeof(s));
+ deflateInit(&s, pack_compression_level);
+
+ hdrlen = encode_header(OBJ_BLOB, len, out_buf);
+ if (out_sz <= hdrlen)
+ die("impossibly large object header");
+
+ s.next_out = out_buf + hdrlen;
+ s.avail_out = out_sz - hdrlen;
+
+ while (status != Z_STREAM_END) {
+ if (0 < len && !s.avail_in) {
+ size_t cnt = in_sz < len ? in_sz : (size_t)len;
+ size_t n = fread(in_buf, 1, cnt, stdin);
+ if (!n && feof(stdin))
+ die("EOF in data (%" PRIuMAX " bytes remaining)", len);
+
+ git_SHA1_Update(&c, in_buf, n);
+ s.next_in = in_buf;
+ s.avail_in = n;
+ len -= n;
+ }
+
+ status = deflate(&s, len ? 0 : Z_FINISH);
+
+ if (!s.avail_out || status == Z_STREAM_END) {
+ size_t n = s.next_out - out_buf;
+ write_or_die(pack_data->pack_fd, out_buf, n);
+ pack_size += n;
+ s.next_out = out_buf;
+ s.avail_out = out_sz;
+ }
+
+ switch (status) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ continue;
+ default:
+ die("unexpected deflate failure: %d", status);
+ }
+ }
+ deflateEnd(&s);
+ git_SHA1_Final(sha1, &c);
+
+ if (sha1out)
+ hashcpy(sha1out, sha1);
+
+ e = insert_object(sha1);
+
+ if (mark)
+ insert_mark(mark, e);
+
+ if (e->offset) {
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(offset);
+
+ } else if (find_sha1_pack(sha1, packed_git)) {
+ e->type = OBJ_BLOB;
+ e->pack_id = MAX_PACK_ID;
+ e->offset = 1; /* just not zero! */
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(offset);
+
+ } else {
+ e->depth = 0;
+ e->type = OBJ_BLOB;
+ e->pack_id = pack_id;
+ e->offset = offset;
+ object_count++;
+ object_count_by_type[OBJ_BLOB]++;
+ }
+
+ free(in_buf);
+ free(out_buf);
+}
+
/* All calls must be guarded by find_object() or find_mark() to
* ensure the 'struct object_entry' passed was written by this
* process instance. We unpack the entry by the offset, avoiding
@@ -1700,7 +1817,7 @@ static void parse_mark(void)
next_mark = 0;
}
-static void parse_data(struct strbuf *sb)
+static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
{
strbuf_reset(sb);
@@ -1724,9 +1841,15 @@ static void parse_data(struct strbuf *sb)
free(term);
}
else {
- size_t n = 0, length;
+ uintmax_t len = strtoumax(command_buf.buf + 5, NULL, 10);
+ size_t n = 0, length = (size_t)len;
- length = strtoul(command_buf.buf + 5, NULL, 10);
+ if (limit && limit < len) {
+ *len_res = len;
+ return 0;
+ }
+ if (length < len)
+ die("data is too large to use in this context");
while (n < length) {
size_t s = strbuf_fread(sb, length - n, stdin);
@@ -1738,6 +1861,7 @@ static void parse_data(struct strbuf *sb)
}
skip_optional_lf();
+ return 1;
}
static int validate_raw_date(const char *src, char *result, int maxlen)
@@ -1802,14 +1926,32 @@ static char *parse_ident(const char *buf)
return ident;
}
-static void parse_new_blob(void)
+static void parse_and_store_blob(
+ struct last_object *last,
+ unsigned char *sha1out,
+ uintmax_t mark)
{
static struct strbuf buf = STRBUF_INIT;
+ uintmax_t len;
+ if (parse_data(&buf, big_file_threshold, &len))
+ store_object(OBJ_BLOB, &buf, last, sha1out, mark);
+ else {
+ if (last) {
+ strbuf_release(&last->data);
+ last->offset = 0;
+ last->depth = 0;
+ }
+ stream_blob(len, sha1out, mark);
+ skip_optional_lf();
+ }
+}
+
+static void parse_new_blob(void)
+{
read_next_command();
parse_mark();
- parse_data(&buf);
- store_object(OBJ_BLOB, &buf, &last_blob, NULL, next_mark);
+ parse_and_store_blob(&last_blob, NULL, next_mark);
}
static void unload_one_branch(void)
@@ -1920,15 +2062,12 @@ static void file_change_m(struct branch *b)
* another repository.
*/
} else if (inline_data) {
- static struct strbuf buf = STRBUF_INIT;
-
if (p != uq.buf) {
strbuf_addstr(&uq, p);
p = uq.buf;
}
read_next_command();
- parse_data(&buf);
- store_object(OBJ_BLOB, &buf, &last_blob, sha1, 0);
+ parse_and_store_blob(&last_blob, sha1, 0);
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
@@ -2006,6 +2145,77 @@ static void file_change_cr(struct branch *b, int rename)
leaf.tree);
}
+static void note_change_n(struct branch *b)
+{
+ const char *p = command_buf.buf + 2;
+ static struct strbuf uq = STRBUF_INIT;
+ struct object_entry *oe = oe;
+ struct branch *s;
+ unsigned char sha1[20], commit_sha1[20];
+ uint16_t inline_data = 0;
+
+ /* <dataref> or 'inline' */
+ if (*p == ':') {
+ char *x;
+ oe = find_mark(strtoumax(p + 1, &x, 10));
+ hashcpy(sha1, oe->sha1);
+ p = x;
+ } else if (!prefixcmp(p, "inline")) {
+ inline_data = 1;
+ p += 6;
+ } else {
+ if (get_sha1_hex(p, sha1))
+ die("Invalid SHA1: %s", command_buf.buf);
+ oe = find_object(sha1);
+ p += 40;
+ }
+ if (*p++ != ' ')
+ die("Missing space after SHA1: %s", command_buf.buf);
+
+ /* <committish> */
+ s = lookup_branch(p);
+ if (s) {
+ hashcpy(commit_sha1, s->sha1);
+ } else if (*p == ':') {
+ uintmax_t commit_mark = strtoumax(p + 1, NULL, 10);
+ struct object_entry *commit_oe = find_mark(commit_mark);
+ if (commit_oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", commit_mark);
+ hashcpy(commit_sha1, commit_oe->sha1);
+ } else if (!get_sha1(p, commit_sha1)) {
+ unsigned long size;
+ char *buf = read_object_with_reference(commit_sha1,
+ commit_type, &size, commit_sha1);
+ if (!buf || size < 46)
+ die("Not a valid commit: %s", p);
+ free(buf);
+ } else
+ die("Invalid ref name or SHA1 expression: %s", p);
+
+ if (inline_data) {
+ if (p != uq.buf) {
+ strbuf_addstr(&uq, p);
+ p = uq.buf;
+ }
+ read_next_command();
+ parse_and_store_blob(&last_blob, sha1, 0);
+ } else if (oe) {
+ if (oe->type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ typename(oe->type), command_buf.buf);
+ } else {
+ enum object_type type = sha1_object_info(sha1, NULL);
+ if (type < 0)
+ die("Blob not found: %s", command_buf.buf);
+ if (type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ typename(type), command_buf.buf);
+ }
+
+ tree_content_set(&b->branch_tree, sha1_to_hex(commit_sha1), sha1,
+ S_IFREG | 0644, NULL);
+}
+
static void file_change_deleteall(struct branch *b)
{
release_tree_content_recursive(b->branch_tree.tree);
@@ -2154,7 +2364,7 @@ static void parse_new_commit(void)
}
if (!committer)
die("Expected committer but didn't get one");
- parse_data(&msg);
+ parse_data(&msg, 0, NULL);
read_next_command();
parse_from(b);
merge_list = parse_merge(&merge_count);
@@ -2175,6 +2385,8 @@ static void parse_new_commit(void)
file_change_cr(b, 1);
else if (!prefixcmp(command_buf.buf, "C "))
file_change_cr(b, 0);
+ else if (!prefixcmp(command_buf.buf, "N "))
+ note_change_n(b);
else if (!strcmp("deleteall", command_buf.buf))
file_change_deleteall(b);
else {
@@ -2273,7 +2485,7 @@ static void parse_new_tag(void)
tagger = NULL;
/* tag payload/message */
- parse_data(&msg);
+ parse_data(&msg, 0, NULL);
/* build the tag object */
strbuf_reset(&new_data);
@@ -2393,11 +2605,15 @@ static int git_pack_config(const char *k, const char *v, void *cb)
pack_compression_seen = 1;
return 0;
}
+ if (!strcmp(k, "core.bigfilethreshold")) {
+ long n = git_config_int(k, v);
+ big_file_threshold = 0 < n ? n : 0;
+ }
return git_default_config(k, v, cb);
}
static const char fast_import_usage[] =
-"git fast-import [--date-format=f] [--max-pack-size=n] [--depth=n] [--active-branches=n] [--export-marks=marks.file]";
+"git fast-import [--date-format=f] [--max-pack-size=n] [--big-file-threshold=n] [--depth=n] [--active-branches=n] [--export-marks=marks.file]";
int main(int argc, const char **argv)
{
@@ -2405,6 +2621,9 @@ int main(int argc, const char **argv)
git_extract_argv0_path(argv[0]);
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(fast_import_usage);
+
setup_git_directory();
git_config(git_pack_config, NULL);
if (!pack_compression_seen && core_compression_seen)
@@ -2435,7 +2654,12 @@ int main(int argc, const char **argv)
}
else if (!prefixcmp(a, "--max-pack-size="))
max_packsize = strtoumax(a + 16, NULL, 0) * 1024 * 1024;
- else if (!prefixcmp(a, "--depth=")) {
+ else if (!prefixcmp(a, "--big-file-threshold=")) {
+ unsigned long v;
+ if (!git_parse_ulong(a + 21, &v))
+ usage(fast_import_usage);
+ big_file_threshold = v;
+ } else if (!prefixcmp(a, "--depth=")) {
max_depth = strtoul(a + 8, NULL, 0);
if (max_depth > MAX_DEPTH)
die("--depth cannot exceed %u", MAX_DEPTH);