summaryrefslogtreecommitdiff
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/am.c30
-rw-r--r--builtin/bisect--helper.c2
-rw-r--r--builtin/bugreport.c194
-rw-r--r--builtin/checkout.c6
-rw-r--r--builtin/commit.c30
-rw-r--r--builtin/credential-cache--daemon.c318
-rw-r--r--builtin/credential-cache.c157
-rw-r--r--builtin/credential-store.c195
-rw-r--r--builtin/fast-import.c3640
-rw-r--r--builtin/fetch-pack.c4
-rw-r--r--builtin/fetch.c52
-rw-r--r--builtin/index-pack.c19
-rw-r--r--builtin/merge.c2
-rw-r--r--builtin/rebase.c47
-rw-r--r--builtin/remote.c1
-rw-r--r--builtin/repack.c12
-rw-r--r--builtin/submodule--helper.c448
-rw-r--r--builtin/worktree.c8
18 files changed, 5089 insertions, 76 deletions
diff --git a/builtin/am.c b/builtin/am.c
index dd4e6c2d9b..b5c63ddf1d 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -98,6 +98,8 @@ struct am_state {
char *author_name;
char *author_email;
char *author_date;
+ char *committer_name;
+ char *committer_email;
char *msg;
size_t msg_len;
@@ -130,6 +132,8 @@ struct am_state {
*/
static void am_state_init(struct am_state *state)
{
+ const char *committer;
+ struct ident_split id;
int gpgsign;
memset(state, 0, sizeof(*state));
@@ -150,6 +154,14 @@ static void am_state_init(struct am_state *state)
if (!git_config_get_bool("commit.gpgsign", &gpgsign))
state->sign_commit = gpgsign ? "" : NULL;
+
+ committer = git_committer_info(IDENT_STRICT);
+ if (split_ident_line(&id, committer, strlen(committer)) < 0)
+ die(_("invalid committer: %s"), committer);
+ state->committer_name =
+ xmemdupz(id.name_begin, id.name_end - id.name_begin);
+ state->committer_email =
+ xmemdupz(id.mail_begin, id.mail_end - id.mail_end);
}
/**
@@ -161,6 +173,8 @@ static void am_state_release(struct am_state *state)
free(state->author_name);
free(state->author_email);
free(state->author_date);
+ free(state->committer_name);
+ free(state->committer_email);
free(state->msg);
strvec_clear(&state->git_apply_opts);
}
@@ -1556,7 +1570,7 @@ static void do_commit(const struct am_state *state)
struct object_id tree, parent, commit;
const struct object_id *old_oid;
struct commit_list *parents = NULL;
- const char *reflog_msg, *author;
+ const char *reflog_msg, *author, *committer = NULL;
struct strbuf sb = STRBUF_INIT;
if (run_hook_le(NULL, "pre-applypatch", NULL))
@@ -1580,11 +1594,15 @@ static void do_commit(const struct am_state *state)
IDENT_STRICT);
if (state->committer_date_is_author_date)
- setenv("GIT_COMMITTER_DATE",
- state->ignore_date ? "" : state->author_date, 1);
-
- if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
- author, state->sign_commit))
+ committer = fmt_ident(state->committer_name,
+ state->author_email, WANT_COMMITTER_IDENT,
+ state->ignore_date ? NULL
+ : state->author_date,
+ IDENT_STRICT);
+
+ if (commit_tree_extended(state->msg, state->msg_len, &tree, parents,
+ &commit, author, committer, state->sign_commit,
+ NULL))
die(_("failed to write commit object"));
reflog_msg = getenv("GIT_REFLOG_ACTION");
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index cdda279b23..7dcc1b5188 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -27,7 +27,7 @@ static const char * const git_bisect_helper_usage[] = {
N_("git bisect--helper --bisect-check-and-set-terms <command> <good_term> <bad_term>"),
N_("git bisect--helper --bisect-next-check <good_term> <bad_term> [<term>]"),
N_("git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]"),
- N_("git bisect--helper --bisect-start [--term-{old,good}=<term> --term-{new,bad}=<term>]"
+ N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
" [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
NULL
};
diff --git a/builtin/bugreport.c b/builtin/bugreport.c
new file mode 100644
index 0000000000..3ad4b9b62e
--- /dev/null
+++ b/builtin/bugreport.c
@@ -0,0 +1,194 @@
+#include "builtin.h"
+#include "parse-options.h"
+#include "strbuf.h"
+#include "help.h"
+#include "compat/compiler.h"
+#include "run-command.h"
+
+
+static void get_system_info(struct strbuf *sys_info)
+{
+ struct utsname uname_info;
+ char *shell = NULL;
+
+ /* get git version from native cmd */
+ strbuf_addstr(sys_info, _("git version:\n"));
+ get_version_info(sys_info, 1);
+
+ /* system call for other version info */
+ strbuf_addstr(sys_info, "uname: ");
+ if (uname(&uname_info))
+ strbuf_addf(sys_info, _("uname() failed with error '%s' (%d)\n"),
+ strerror(errno),
+ errno);
+ else
+ strbuf_addf(sys_info, "%s %s %s %s\n",
+ uname_info.sysname,
+ uname_info.release,
+ uname_info.version,
+ uname_info.machine);
+
+ strbuf_addstr(sys_info, _("compiler info: "));
+ get_compiler_info(sys_info);
+
+ strbuf_addstr(sys_info, _("libc info: "));
+ get_libc_info(sys_info);
+
+ shell = getenv("SHELL");
+ strbuf_addf(sys_info, "$SHELL (typically, interactive shell): %s\n",
+ shell ? shell : "<unset>");
+}
+
+static void get_populated_hooks(struct strbuf *hook_info, int nongit)
+{
+ /*
+ * NEEDSWORK: Doesn't look like there is a list of all possible hooks;
+ * so below is a transcription of `git help hooks`. Later, this should
+ * be replaced with some programmatically generated list (generated from
+ * doc or else taken from some library which tells us about all the
+ * hooks)
+ */
+ static const char *hook[] = {
+ "applypatch-msg",
+ "pre-applypatch",
+ "post-applypatch",
+ "pre-commit",
+ "pre-merge-commit",
+ "prepare-commit-msg",
+ "commit-msg",
+ "post-commit",
+ "pre-rebase",
+ "post-checkout",
+ "post-merge",
+ "pre-push",
+ "pre-receive",
+ "update",
+ "post-receive",
+ "post-update",
+ "push-to-checkout",
+ "pre-auto-gc",
+ "post-rewrite",
+ "sendemail-validate",
+ "fsmonitor-watchman",
+ "p4-pre-submit",
+ "post-index-change",
+ };
+ int i;
+
+ if (nongit) {
+ strbuf_addstr(hook_info,
+ _("not run from a git repository - no hooks to show\n"));
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hook); i++)
+ if (find_hook(hook[i]))
+ strbuf_addf(hook_info, "%s\n", hook[i]);
+}
+
+static const char * const bugreport_usage[] = {
+ N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>]"),
+ NULL
+};
+
+static int get_bug_template(struct strbuf *template)
+{
+ const char template_text[] = N_(
+"Thank you for filling out a Git bug report!\n"
+"Please answer the following questions to help us understand your issue.\n"
+"\n"
+"What did you do before the bug happened? (Steps to reproduce your issue)\n"
+"\n"
+"What did you expect to happen? (Expected behavior)\n"
+"\n"
+"What happened instead? (Actual behavior)\n"
+"\n"
+"What's different between what you expected and what actually happened?\n"
+"\n"
+"Anything else you want to add:\n"
+"\n"
+"Please review the rest of the bug report below.\n"
+"You can delete any lines you don't wish to share.\n");
+
+ strbuf_addstr(template, _(template_text));
+ return 0;
+}
+
+static void get_header(struct strbuf *buf, const char *title)
+{
+ strbuf_addf(buf, "\n\n[%s]\n", title);
+}
+
+int cmd_bugreport(int argc, const char **argv, const char *prefix)
+{
+ struct strbuf buffer = STRBUF_INIT;
+ struct strbuf report_path = STRBUF_INIT;
+ int report = -1;
+ time_t now = time(NULL);
+ char *option_output = NULL;
+ char *option_suffix = "%Y-%m-%d-%H%M";
+ const char *user_relative_path = NULL;
+
+ const struct option bugreport_options[] = {
+ OPT_STRING('o', "output-directory", &option_output, N_("path"),
+ N_("specify a destination for the bugreport file")),
+ OPT_STRING('s', "suffix", &option_suffix, N_("format"),
+ N_("specify a strftime format suffix for the filename")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, bugreport_options,
+ bugreport_usage, 0);
+
+ /* Prepare the path to put the result */
+ strbuf_addstr(&report_path,
+ prefix_filename(prefix,
+ option_output ? option_output : ""));
+ strbuf_complete(&report_path, '/');
+
+ strbuf_addstr(&report_path, "git-bugreport-");
+ strbuf_addftime(&report_path, option_suffix, localtime(&now), 0, 0);
+ strbuf_addstr(&report_path, ".txt");
+
+ switch (safe_create_leading_directories(report_path.buf)) {
+ case SCLD_OK:
+ case SCLD_EXISTS:
+ break;
+ default:
+ die(_("could not create leading directories for '%s'"),
+ report_path.buf);
+ }
+
+ /* Prepare the report contents */
+ get_bug_template(&buffer);
+
+ get_header(&buffer, _("System Info"));
+ get_system_info(&buffer);
+
+ get_header(&buffer, _("Enabled Hooks"));
+ get_populated_hooks(&buffer, !startup_info->have_repository);
+
+ /* fopen doesn't offer us an O_EXCL alternative, except with glibc. */
+ report = open(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666);
+
+ if (report < 0)
+ die(_("couldn't create a new file at '%s'"), report_path.buf);
+
+ if (write_in_full(report, buffer.buf, buffer.len) < 0)
+ die_errno(_("unable to write to %s"), report_path.buf);
+
+ close(report);
+
+ /*
+ * We want to print the path relative to the user, but we still need the
+ * path relative to us to give to the editor.
+ */
+ if (!(prefix && skip_prefix(report_path.buf, prefix, &user_relative_path)))
+ user_relative_path = report_path.buf;
+ fprintf(stderr, _("Created new report at '%s'.\n"),
+ user_relative_path);
+
+ UNLEAK(buffer);
+ UNLEAK(report_path);
+ return !!launch_editor(report_path.buf, NULL, NULL);
+}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2837195491..6ec82097a2 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -1120,8 +1120,10 @@ static void setup_new_branch_info_and_source_tree(
if (!check_refname_format(new_branch_info->path, 0) &&
!read_ref(new_branch_info->path, &branch_rev))
oidcpy(rev, &branch_rev);
- else
+ else {
+ free((char *)new_branch_info->path);
new_branch_info->path = NULL; /* not an existing branch */
+ }
new_branch_info->commit = lookup_commit_reference_gently(the_repository, rev, 1);
if (!new_branch_info->commit) {
@@ -1707,6 +1709,8 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
die(_("--pathspec-file-nul requires --pathspec-from-file"));
}
+ opts->pathspec.recursive = 1;
+
if (opts->pathspec.nr) {
if (1 < !!opts->writeout_stage + !!opts->force + !!opts->merge)
die(_("git checkout: --ours/--theirs, --force and --merge are incompatible when\n"
diff --git a/builtin/commit.c b/builtin/commit.c
index 69ac78d5e5..5d91b13a5c 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -847,21 +847,19 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
!merge_contains_scissors)
wt_status_add_cut_line(s->fp);
- status_printf_ln(s, GIT_COLOR_NORMAL,
- whence == FROM_MERGE
- ? _("\n"
- "It looks like you may be committing a merge.\n"
- "If this is not correct, please remove the file\n"
- " %s\n"
- "and try again.\n")
- : _("\n"
- "It looks like you may be committing a cherry-pick.\n"
- "If this is not correct, please remove the file\n"
- " %s\n"
- "and try again.\n"),
+ status_printf_ln(
+ s, GIT_COLOR_NORMAL,
whence == FROM_MERGE ?
- git_path_merge_head(the_repository) :
- git_path_cherry_pick_head(the_repository));
+ _("\n"
+ "It looks like you may be committing a merge.\n"
+ "If this is not correct, please run\n"
+ " git update-ref -d MERGE_HEAD\n"
+ "and try again.\n") :
+ _("\n"
+ "It looks like you may be committing a cherry-pick.\n"
+ "If this is not correct, please run\n"
+ " git update-ref -d CHERRY_PICK_HEAD\n"
+ "and try again.\n"));
}
fprintf(s->fp, "\n");
@@ -1674,8 +1672,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
}
if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
- parents, &oid, author_ident.buf, sign_commit,
- extra)) {
+ parents, &oid, author_ident.buf, NULL,
+ sign_commit, extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
new file mode 100644
index 0000000000..c61f123a3b
--- /dev/null
+++ b/builtin/credential-cache--daemon.c
@@ -0,0 +1,318 @@
+#include "builtin.h"
+#include "parse-options.h"
+
+#ifndef NO_UNIX_SOCKETS
+
+#include "config.h"
+#include "tempfile.h"
+#include "credential.h"
+#include "unix-socket.h"
+
+struct credential_cache_entry {
+ struct credential item;
+ timestamp_t expiration;
+};
+static struct credential_cache_entry *entries;
+static int entries_nr;
+static int entries_alloc;
+
+static void cache_credential(struct credential *c, int timeout)
+{
+ struct credential_cache_entry *e;
+
+ ALLOC_GROW(entries, entries_nr + 1, entries_alloc);
+ e = &entries[entries_nr++];
+
+ /* take ownership of pointers */
+ memcpy(&e->item, c, sizeof(*c));
+ memset(c, 0, sizeof(*c));
+ e->expiration = time(NULL) + timeout;
+}
+
+static struct credential_cache_entry *lookup_credential(const struct credential *c)
+{
+ int i;
+ for (i = 0; i < entries_nr; i++) {
+ struct credential *e = &entries[i].item;
+ if (credential_match(c, e))
+ return &entries[i];
+ }
+ return NULL;
+}
+
+static void remove_credential(const struct credential *c)
+{
+ struct credential_cache_entry *e;
+
+ e = lookup_credential(c);
+ if (e)
+ e->expiration = 0;
+}
+
+static timestamp_t check_expirations(void)
+{
+ static timestamp_t wait_for_entry_until;
+ int i = 0;
+ timestamp_t now = time(NULL);
+ timestamp_t next = TIME_MAX;
+
+ /*
+ * Initially give the client 30 seconds to actually contact us
+ * and store a credential before we decide there's no point in
+ * keeping the daemon around.
+ */
+ if (!wait_for_entry_until)
+ wait_for_entry_until = now + 30;
+
+ while (i < entries_nr) {
+ if (entries[i].expiration <= now) {
+ entries_nr--;
+ credential_clear(&entries[i].item);
+ if (i != entries_nr)
+ memcpy(&entries[i], &entries[entries_nr], sizeof(*entries));
+ /*
+ * Stick around 30 seconds in case a new credential
+ * shows up (e.g., because we just removed a failed
+ * one, and we will soon get the correct one).
+ */
+ wait_for_entry_until = now + 30;
+ }
+ else {
+ if (entries[i].expiration < next)
+ next = entries[i].expiration;
+ i++;
+ }
+ }
+
+ if (!entries_nr) {
+ if (wait_for_entry_until <= now)
+ return 0;
+ next = wait_for_entry_until;
+ }
+
+ return next - now;
+}
+
+static int read_request(FILE *fh, struct credential *c,
+ struct strbuf *action, int *timeout)
+{
+ static struct strbuf item = STRBUF_INIT;
+ const char *p;
+
+ strbuf_getline_lf(&item, fh);
+ if (!skip_prefix(item.buf, "action=", &p))
+ return error("client sent bogus action line: %s", item.buf);
+ strbuf_addstr(action, p);
+
+ strbuf_getline_lf(&item, fh);
+ if (!skip_prefix(item.buf, "timeout=", &p))
+ return error("client sent bogus timeout line: %s", item.buf);
+ *timeout = atoi(p);
+
+ if (credential_read(c, fh) < 0)
+ return -1;
+ return 0;
+}
+
+static void serve_one_client(FILE *in, FILE *out)
+{
+ struct credential c = CREDENTIAL_INIT;
+ struct strbuf action = STRBUF_INIT;
+ int timeout = -1;
+
+ if (read_request(in, &c, &action, &timeout) < 0)
+ /* ignore error */ ;
+ else if (!strcmp(action.buf, "get")) {
+ struct credential_cache_entry *e = lookup_credential(&c);
+ if (e) {
+ fprintf(out, "username=%s\n", e->item.username);
+ fprintf(out, "password=%s\n", e->item.password);
+ }
+ }
+ else if (!strcmp(action.buf, "exit")) {
+ /*
+ * It's important that we clean up our socket first, and then
+ * signal the client only once we have finished the cleanup.
+ * Calling exit() directly does this, because we clean up in
+ * our atexit() handler, and then signal the client when our
+ * process actually ends, which closes the socket and gives
+ * them EOF.
+ */
+ exit(0);
+ }
+ else if (!strcmp(action.buf, "erase"))
+ remove_credential(&c);
+ else if (!strcmp(action.buf, "store")) {
+ if (timeout < 0)
+ warning("cache client didn't specify a timeout");
+ else if (!c.username || !c.password)
+ warning("cache client gave us a partial credential");
+ else {
+ remove_credential(&c);
+ cache_credential(&c, timeout);
+ }
+ }
+ else
+ warning("cache client sent unknown action: %s", action.buf);
+
+ credential_clear(&c);
+ strbuf_release(&action);
+}
+
+static int serve_cache_loop(int fd)
+{
+ struct pollfd pfd;
+ timestamp_t wakeup;
+
+ wakeup = check_expirations();
+ if (!wakeup)
+ return 0;
+
+ pfd.fd = fd;
+ pfd.events = POLLIN;
+ if (poll(&pfd, 1, 1000 * wakeup) < 0) {
+ if (errno != EINTR)
+ die_errno("poll failed");
+ return 1;
+ }
+
+ if (pfd.revents & POLLIN) {
+ int client, client2;
+ FILE *in, *out;
+
+ client = accept(fd, NULL, NULL);
+ if (client < 0) {
+ warning_errno("accept failed");
+ return 1;
+ }
+ client2 = dup(client);
+ if (client2 < 0) {
+ warning_errno("dup failed");
+ close(client);
+ return 1;
+ }
+
+ in = xfdopen(client, "r");
+ out = xfdopen(client2, "w");
+ serve_one_client(in, out);
+ fclose(in);
+ fclose(out);
+ }
+ return 1;
+}
+
+static void serve_cache(const char *socket_path, int debug)
+{
+ int fd;
+
+ fd = unix_stream_listen(socket_path);
+ if (fd < 0)
+ die_errno("unable to bind to '%s'", socket_path);
+
+ printf("ok\n");
+ fclose(stdout);
+ if (!debug) {
+ if (!freopen("/dev/null", "w", stderr))
+ die_errno("unable to point stderr to /dev/null");
+ }
+
+ while (serve_cache_loop(fd))
+ ; /* nothing */
+
+ close(fd);
+}
+
+static const char permissions_advice[] = N_(
+"The permissions on your socket directory are too loose; other\n"
+"users may be able to read your cached credentials. Consider running:\n"
+"\n"
+" chmod 0700 %s");
+static void init_socket_directory(const char *path)
+{
+ struct stat st;
+ char *path_copy = xstrdup(path);
+ char *dir = dirname(path_copy);
+
+ if (!stat(dir, &st)) {
+ if (st.st_mode & 077)
+ die(_(permissions_advice), dir);
+ } else {
+ /*
+ * We must be sure to create the directory with the correct mode,
+ * not just chmod it after the fact; otherwise, there is a race
+ * condition in which somebody can chdir to it, sleep, then try to open
+ * our protected socket.
+ */
+ if (safe_create_leading_directories_const(dir) < 0)
+ die_errno("unable to create directories for '%s'", dir);
+ if (mkdir(dir, 0700) < 0)
+ die_errno("unable to mkdir '%s'", dir);
+ }
+
+ if (chdir(dir))
+ /*
+ * We don't actually care what our cwd is; we chdir here just to
+ * be a friendly daemon and avoid tying up our original cwd.
+ * If this fails, it's OK to just continue without that benefit.
+ */
+ ;
+
+ free(path_copy);
+}
+
+int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
+{
+ struct tempfile *socket_file;
+ const char *socket_path;
+ int ignore_sighup = 0;
+ static const char *usage[] = {
+ "git-credential-cache--daemon [opts] <socket_path>",
+ NULL
+ };
+ int debug = 0;
+ const struct option options[] = {
+ OPT_BOOL(0, "debug", &debug,
+ N_("print debugging messages to stderr")),
+ OPT_END()
+ };
+
+ git_config_get_bool("credentialcache.ignoresighup", &ignore_sighup);
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ socket_path = argv[0];
+
+ if (!socket_path)
+ usage_with_options(usage, options);
+
+ if (!is_absolute_path(socket_path))
+ die("socket directory must be an absolute path");
+
+ init_socket_directory(socket_path);
+ socket_file = register_tempfile(socket_path);
+
+ if (ignore_sighup)
+ signal(SIGHUP, SIG_IGN);
+
+ serve_cache(socket_path, debug);
+ delete_tempfile(&socket_file);
+
+ return 0;
+}
+
+#else
+
+int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-cache--daemon [options] <action>",
+ "",
+ "credential-cache--daemon is disabled in this build of Git",
+ NULL
+ };
+ struct option options[] = { OPT_END() };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ die(_("credential-cache--daemon unavailable; no unix socket support"));
+}
+
+#endif /* NO_UNIX_SOCKET */
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
new file mode 100644
index 0000000000..9b3f709905
--- /dev/null
+++ b/builtin/credential-cache.c
@@ -0,0 +1,157 @@
+#include "builtin.h"
+#include "parse-options.h"
+
+#ifndef NO_UNIX_SOCKETS
+
+#include "credential.h"
+#include "string-list.h"
+#include "unix-socket.h"
+#include "run-command.h"
+
+#define FLAG_SPAWN 0x1
+#define FLAG_RELAY 0x2
+
+static int send_request(const char *socket, const struct strbuf *out)
+{
+ int got_data = 0;
+ int fd = unix_stream_connect(socket);
+
+ if (fd < 0)
+ return -1;
+
+ if (write_in_full(fd, out->buf, out->len) < 0)
+ die_errno("unable to write to cache daemon");
+ shutdown(fd, SHUT_WR);
+
+ while (1) {
+ char in[1024];
+ int r;
+
+ r = read_in_full(fd, in, sizeof(in));
+ if (r == 0 || (r < 0 && errno == ECONNRESET))
+ break;
+ if (r < 0)
+ die_errno("read error from cache daemon");
+ write_or_die(1, in, r);
+ got_data = 1;
+ }
+ close(fd);
+ return got_data;
+}
+
+static void spawn_daemon(const char *socket)
+{
+ struct child_process daemon = CHILD_PROCESS_INIT;
+ char buf[128];
+ int r;
+
+ strvec_pushl(&daemon.args,
+ "credential-cache--daemon", socket,
+ NULL);
+ daemon.git_cmd = 1;
+ daemon.no_stdin = 1;
+ daemon.out = -1;
+
+ if (start_command(&daemon))
+ die_errno("unable to start cache daemon");
+ r = read_in_full(daemon.out, buf, sizeof(buf));
+ if (r < 0)
+ die_errno("unable to read result code from cache daemon");
+ if (r != 3 || memcmp(buf, "ok\n", 3))
+ die("cache daemon did not start: %.*s", r, buf);
+ close(daemon.out);
+}
+
+static void do_cache(const char *socket, const char *action, int timeout,
+ int flags)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "action=%s\n", action);
+ strbuf_addf(&buf, "timeout=%d\n", timeout);
+ if (flags & FLAG_RELAY) {
+ if (strbuf_read(&buf, 0, 0) < 0)
+ die_errno("unable to relay credential");
+ }
+
+ if (send_request(socket, &buf) < 0) {
+ if (errno != ENOENT && errno != ECONNREFUSED)
+ die_errno("unable to connect to cache daemon");
+ if (flags & FLAG_SPAWN) {
+ spawn_daemon(socket);
+ if (send_request(socket, &buf) < 0)
+ die_errno("unable to connect to cache daemon");
+ }
+ }
+ strbuf_release(&buf);
+}
+
+static char *get_socket_path(void)
+{
+ struct stat sb;
+ char *old_dir, *socket;
+ old_dir = expand_user_path("~/.git-credential-cache", 0);
+ if (old_dir && !stat(old_dir, &sb) && S_ISDIR(sb.st_mode))
+ socket = xstrfmt("%s/socket", old_dir);
+ else
+ socket = xdg_cache_home("credential/socket");
+ free(old_dir);
+ return socket;
+}
+
+int cmd_credential_cache(int argc, const char **argv, const char *prefix)
+{
+ char *socket_path = NULL;
+ int timeout = 900;
+ const char *op;
+ const char * const usage[] = {
+ "git credential-cache [<options>] <action>",
+ NULL
+ };
+ struct option options[] = {
+ OPT_INTEGER(0, "timeout", &timeout,
+ "number of seconds to cache credentials"),
+ OPT_STRING(0, "socket", &socket_path, "path",
+ "path of cache-daemon socket"),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ if (!argc)
+ usage_with_options(usage, options);
+ op = argv[0];
+
+ if (!socket_path)
+ socket_path = get_socket_path();
+ if (!socket_path)
+ die("unable to find a suitable socket path; use --socket");
+
+ if (!strcmp(op, "exit"))
+ do_cache(socket_path, op, timeout, 0);
+ else if (!strcmp(op, "get") || !strcmp(op, "erase"))
+ do_cache(socket_path, op, timeout, FLAG_RELAY);
+ else if (!strcmp(op, "store"))
+ do_cache(socket_path, op, timeout, FLAG_RELAY|FLAG_SPAWN);
+ else
+ ; /* ignore unknown operation */
+
+ return 0;
+}
+
+#else
+
+int cmd_credential_cache(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-cache [options] <action>",
+ "",
+ "credential-cache is disabled in this build of Git",
+ NULL
+ };
+ struct option options[] = { OPT_END() };
+
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ die(_("credential-cache unavailable; no unix socket support"));
+}
+
+#endif /* NO_UNIX_SOCKETS */
diff --git a/builtin/credential-store.c b/builtin/credential-store.c
new file mode 100644
index 0000000000..5331ab151a
--- /dev/null
+++ b/builtin/credential-store.c
@@ -0,0 +1,195 @@
+#include "builtin.h"
+#include "lockfile.h"
+#include "credential.h"
+#include "string-list.h"
+#include "parse-options.h"
+
+static struct lock_file credential_lock;
+
+static int parse_credential_file(const char *fn,
+ struct credential *c,
+ void (*match_cb)(struct credential *),
+ void (*other_cb)(struct strbuf *))
+{
+ FILE *fh;
+ struct strbuf line = STRBUF_INIT;
+ struct credential entry = CREDENTIAL_INIT;
+ int found_credential = 0;
+
+ fh = fopen(fn, "r");
+ if (!fh) {
+ if (errno != ENOENT && errno != EACCES)
+ die_errno("unable to open %s", fn);
+ return found_credential;
+ }
+
+ while (strbuf_getline_lf(&line, fh) != EOF) {
+ if (!credential_from_url_gently(&entry, line.buf, 1) &&
+ entry.username && entry.password &&
+ credential_match(c, &entry)) {
+ found_credential = 1;
+ if (match_cb) {
+ match_cb(&entry);
+ break;
+ }
+ }
+ else if (other_cb)
+ other_cb(&line);
+ }
+
+ credential_clear(&entry);
+ strbuf_release(&line);
+ fclose(fh);
+ return found_credential;
+}
+
+static void print_entry(struct credential *c)
+{
+ printf("username=%s\n", c->username);
+ printf("password=%s\n", c->password);
+}
+
+static void print_line(struct strbuf *buf)
+{
+ strbuf_addch(buf, '\n');
+ write_or_die(get_lock_file_fd(&credential_lock), buf->buf, buf->len);
+}
+
+static void rewrite_credential_file(const char *fn, struct credential *c,
+ struct strbuf *extra)
+{
+ if (hold_lock_file_for_update(&credential_lock, fn, 0) < 0)
+ die_errno("unable to get credential storage lock");
+ if (extra)
+ print_line(extra);
+ parse_credential_file(fn, c, NULL, print_line);
+ if (commit_lock_file(&credential_lock) < 0)
+ die_errno("unable to write credential store");
+}
+
+static void store_credential_file(const char *fn, struct credential *c)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "%s://", c->protocol);
+ strbuf_addstr_urlencode(&buf, c->username, is_rfc3986_unreserved);
+ strbuf_addch(&buf, ':');
+ strbuf_addstr_urlencode(&buf, c->password, is_rfc3986_unreserved);
+ strbuf_addch(&buf, '@');
+ if (c->host)
+ strbuf_addstr_urlencode(&buf, c->host, is_rfc3986_unreserved);
+ if (c->path) {
+ strbuf_addch(&buf, '/');
+ strbuf_addstr_urlencode(&buf, c->path,
+ is_rfc3986_reserved_or_unreserved);
+ }
+
+ rewrite_credential_file(fn, c, &buf);
+ strbuf_release(&buf);
+}
+
+static void store_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ /*
+ * Sanity check that what we are storing is actually sensible.
+ * In particular, we can't make a URL without a protocol field.
+ * Without either a host or pathname (depending on the scheme),
+ * we have no primary key. And without a username and password,
+ * we are not actually storing a credential.
+ */
+ if (!c->protocol || !(c->host || c->path) || !c->username || !c->password)
+ return;
+
+ for_each_string_list_item(fn, fns)
+ if (!access(fn->string, F_OK)) {
+ store_credential_file(fn->string, c);
+ return;
+ }
+ /*
+ * Write credential to the filename specified by fns->items[0], thus
+ * creating it
+ */
+ if (fns->nr)
+ store_credential_file(fns->items[0].string, c);
+}
+
+static void remove_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ /*
+ * Sanity check that we actually have something to match
+ * against. The input we get is a restrictive pattern,
+ * so technically a blank credential means "erase everything".
+ * But it is too easy to accidentally send this, since it is equivalent
+ * to empty input. So explicitly disallow it, and require that the
+ * pattern have some actual content to match.
+ */
+ if (!c->protocol && !c->host && !c->path && !c->username)
+ return;
+ for_each_string_list_item(fn, fns)
+ if (!access(fn->string, F_OK))
+ rewrite_credential_file(fn->string, c, NULL);
+}
+
+static void lookup_credential(const struct string_list *fns, struct credential *c)
+{
+ struct string_list_item *fn;
+
+ for_each_string_list_item(fn, fns)
+ if (parse_credential_file(fn->string, c, print_entry, NULL))
+ return; /* Found credential */
+}
+
+int cmd_credential_store(int argc, const char **argv, const char *prefix)
+{
+ const char * const usage[] = {
+ "git credential-store [<options>] <action>",
+ NULL
+ };
+ const char *op;
+ struct credential c = CREDENTIAL_INIT;
+ struct string_list fns = STRING_LIST_INIT_DUP;
+ char *file = NULL;
+ struct option options[] = {
+ OPT_STRING(0, "file", &file, "path",
+ "fetch and store credentials in <path>"),
+ OPT_END()
+ };
+
+ umask(077);
+
+ argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
+ if (argc != 1)
+ usage_with_options(usage, options);
+ op = argv[0];
+
+ if (file) {
+ string_list_append(&fns, file);
+ } else {
+ if ((file = expand_user_path("~/.git-credentials", 0)))
+ string_list_append_nodup(&fns, file);
+ file = xdg_config_home("credentials");
+ if (file)
+ string_list_append_nodup(&fns, file);
+ }
+ if (!fns.nr)
+ die("unable to set up default path; use --file");
+
+ if (credential_read(&c, stdin) < 0)
+ die("unable to read credential");
+
+ if (!strcmp(op, "get"))
+ lookup_credential(&fns, &c);
+ else if (!strcmp(op, "erase"))
+ remove_credential(&fns, &c);
+ else if (!strcmp(op, "store"))
+ store_credential(&fns, &c);
+ else
+ ; /* Ignore unknown operation. */
+
+ string_list_clear(&fns, 0);
+ return 0;
+}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
new file mode 100644
index 0000000000..1c85eafe43
--- /dev/null
+++ b/builtin/fast-import.c
@@ -0,0 +1,3640 @@
+#include "builtin.h"
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "lockfile.h"
+#include "object.h"
+#include "blob.h"
+#include "tree.h"
+#include "commit.h"
+#include "delta.h"
+#include "pack.h"
+#include "refs.h"
+#include "csum-file.h"
+#include "quote.h"
+#include "dir.h"
+#include "run-command.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "mem-pool.h"
+#include "commit-reach.h"
+#include "khash.h"
+
+#define PACK_ID_BITS 16
+#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
+#define DEPTH_BITS 13
+#define MAX_DEPTH ((1<<DEPTH_BITS)-1)
+
+/*
+ * We abuse the setuid bit on directories to mean "do not delta".
+ */
+#define NO_DELTA S_ISUID
+
+/*
+ * The amount of additional space required in order to write an object into the
+ * current pack. This is the hash lengths at the end of the pack, plus the
+ * length of one object ID.
+ */
+#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
+
+struct object_entry {
+ struct pack_idx_entry idx;
+ struct hashmap_entry ent;
+ uint32_t type : TYPE_BITS,
+ pack_id : PACK_ID_BITS,
+ depth : DEPTH_BITS;
+};
+
+static int object_entry_hashcmp(const void *map_data,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
+ const void *keydata)
+{
+ const struct object_id *oid = keydata;
+ const struct object_entry *e1, *e2;
+
+ e1 = container_of(eptr, const struct object_entry, ent);
+ if (oid)
+ return oidcmp(&e1->idx.oid, oid);
+
+ e2 = container_of(entry_or_key, const struct object_entry, ent);
+ return oidcmp(&e1->idx.oid, &e2->idx.oid);
+}
+
+struct object_entry_pool {
+ struct object_entry_pool *next_pool;
+ struct object_entry *next_free;
+ struct object_entry *end;
+ struct object_entry entries[FLEX_ARRAY]; /* more */
+};
+
+struct mark_set {
+ union {
+ struct object_id *oids[1024];
+ struct object_entry *marked[1024];
+ struct mark_set *sets[1024];
+ } data;
+ unsigned int shift;
+};
+
+struct last_object {
+ struct strbuf data;
+ off_t offset;
+ unsigned int depth;
+ unsigned no_swap : 1;
+};
+
+struct atom_str {
+ struct atom_str *next_atom;
+ unsigned short str_len;
+ char str_dat[FLEX_ARRAY]; /* more */
+};
+
+struct tree_content;
+struct tree_entry {
+ struct tree_content *tree;
+ struct atom_str *name;
+ struct tree_entry_ms {
+ uint16_t mode;
+ struct object_id oid;
+ } versions[2];
+};
+
+struct tree_content {
+ unsigned int entry_capacity; /* must match avail_tree_content */
+ unsigned int entry_count;
+ unsigned int delta_depth;
+ struct tree_entry *entries[FLEX_ARRAY]; /* more */
+};
+
+struct avail_tree_content {
+ unsigned int entry_capacity; /* must match tree_content */
+ struct avail_tree_content *next_avail;
+};
+
+struct branch {
+ struct branch *table_next_branch;
+ struct branch *active_next_branch;
+ const char *name;
+ struct tree_entry branch_tree;
+ uintmax_t last_commit;
+ uintmax_t num_notes;
+ unsigned active : 1;
+ unsigned delete : 1;
+ unsigned pack_id : PACK_ID_BITS;
+ struct object_id oid;
+};
+
+struct tag {
+ struct tag *next_tag;
+ const char *name;
+ unsigned int pack_id;
+ struct object_id oid;
+};
+
+struct hash_list {
+ struct hash_list *next;
+ struct object_id oid;
+};
+
+typedef enum {
+ WHENSPEC_RAW = 1,
+ WHENSPEC_RAW_PERMISSIVE,
+ WHENSPEC_RFC2822,
+ WHENSPEC_NOW
+} whenspec_type;
+
+struct recent_command {
+ struct recent_command *prev;
+ struct recent_command *next;
+ char *buf;
+};
+
+typedef void (*mark_set_inserter_t)(struct mark_set *s, struct object_id *oid, uintmax_t mark);
+typedef void (*each_mark_fn_t)(uintmax_t mark, void *obj, void *cbp);
+
+/* Configured limits on output */
+static unsigned long max_depth = 50;
+static off_t max_packsize;
+static int unpack_limit = 100;
+static int force_update;
+
+/* Stats and misc. counters */
+static uintmax_t alloc_count;
+static uintmax_t marks_set_count;
+static uintmax_t object_count_by_type[1 << TYPE_BITS];
+static uintmax_t duplicate_count_by_type[1 << TYPE_BITS];
+static uintmax_t delta_count_by_type[1 << TYPE_BITS];
+static uintmax_t delta_count_attempts_by_type[1 << TYPE_BITS];
+static unsigned long object_count;
+static unsigned long branch_count;
+static unsigned long branch_load_count;
+static int failure;
+static FILE *pack_edges;
+static unsigned int show_stats = 1;
+static int global_argc;
+static const char **global_argv;
+
+/* Memory pools */
+static struct mem_pool fi_mem_pool = {NULL, 2*1024*1024 -
+ sizeof(struct mp_block), 0 };
+
+/* Atom management */
+static unsigned int atom_table_sz = 4451;
+static unsigned int atom_cnt;
+static struct atom_str **atom_table;
+
+/* The .pack file being generated */
+static struct pack_idx_option pack_idx_opts;
+static unsigned int pack_id;
+static struct hashfile *pack_file;
+static struct packed_git *pack_data;
+static struct packed_git **all_packs;
+static off_t pack_size;
+
+/* Table of objects we've written. */
+static unsigned int object_entry_alloc = 5000;
+static struct object_entry_pool *blocks;
+static struct hashmap object_table;
+static struct mark_set *marks;
+static const char *export_marks_file;
+static const char *import_marks_file;
+static int import_marks_file_from_stream;
+static int import_marks_file_ignore_missing;
+static int import_marks_file_done;
+static int relative_marks_paths;
+
+/* Our last blob */
+static struct last_object last_blob = { STRBUF_INIT, 0, 0, 0 };
+
+/* Tree management */
+static unsigned int tree_entry_alloc = 1000;
+static void *avail_tree_entry;
+static unsigned int avail_tree_table_sz = 100;
+static struct avail_tree_content **avail_tree_table;
+static size_t tree_entry_allocd;
+static struct strbuf old_tree = STRBUF_INIT;
+static struct strbuf new_tree = STRBUF_INIT;
+
+/* Branch data */
+static unsigned long max_active_branches = 5;
+static unsigned long cur_active_branches;
+static unsigned long branch_table_sz = 1039;
+static struct branch **branch_table;
+static struct branch *active_branches;
+
+/* Tag data */
+static struct tag *first_tag;
+static struct tag *last_tag;
+
+/* Input stream parsing */
+static whenspec_type whenspec = WHENSPEC_RAW;
+static struct strbuf command_buf = STRBUF_INIT;
+static int unread_command_buf;
+static struct recent_command cmd_hist = {&cmd_hist, &cmd_hist, NULL};
+static struct recent_command *cmd_tail = &cmd_hist;
+static struct recent_command *rc_free;
+static unsigned int cmd_save = 100;
+static uintmax_t next_mark;
+static struct strbuf new_data = STRBUF_INIT;
+static int seen_data_command;
+static int require_explicit_termination;
+static int allow_unsafe_features;
+
+/* Signal handling */
+static volatile sig_atomic_t checkpoint_requested;
+
+/* Submodule marks */
+static struct string_list sub_marks_from = STRING_LIST_INIT_DUP;
+static struct string_list sub_marks_to = STRING_LIST_INIT_DUP;
+static kh_oid_map_t *sub_oid_map;
+
+/* Where to write output of cat-blob commands */
+static int cat_blob_fd = STDOUT_FILENO;
+
+static void parse_argv(void);
+static void parse_get_mark(const char *p);
+static void parse_cat_blob(const char *p);
+static void parse_ls(const char *p, struct branch *b);
+
+static void for_each_mark(struct mark_set *m, uintmax_t base, each_mark_fn_t callback, void *p)
+{
+ uintmax_t k;
+ if (m->shift) {
+ for (k = 0; k < 1024; k++) {
+ if (m->data.sets[k])
+ for_each_mark(m->data.sets[k], base + (k << m->shift), callback, p);
+ }
+ } else {
+ for (k = 0; k < 1024; k++) {
+ if (m->data.marked[k])
+ callback(base + k, m->data.marked[k], p);
+ }
+ }
+}
+
+static void dump_marks_fn(uintmax_t mark, void *object, void *cbp) {
+ struct object_entry *e = object;
+ FILE *f = cbp;
+
+ fprintf(f, ":%" PRIuMAX " %s\n", mark, oid_to_hex(&e->idx.oid));
+}
+
+static void write_branch_report(FILE *rpt, struct branch *b)
+{
+ fprintf(rpt, "%s:\n", b->name);
+
+ fprintf(rpt, " status :");
+ if (b->active)
+ fputs(" active", rpt);
+ if (b->branch_tree.tree)
+ fputs(" loaded", rpt);
+ if (is_null_oid(&b->branch_tree.versions[1].oid))
+ fputs(" dirty", rpt);
+ fputc('\n', rpt);
+
+ fprintf(rpt, " tip commit : %s\n", oid_to_hex(&b->oid));
+ fprintf(rpt, " old tree : %s\n",
+ oid_to_hex(&b->branch_tree.versions[0].oid));
+ fprintf(rpt, " cur tree : %s\n",
+ oid_to_hex(&b->branch_tree.versions[1].oid));
+ fprintf(rpt, " commit clock: %" PRIuMAX "\n", b->last_commit);
+
+ fputs(" last pack : ", rpt);
+ if (b->pack_id < MAX_PACK_ID)
+ fprintf(rpt, "%u", b->pack_id);
+ fputc('\n', rpt);
+
+ fputc('\n', rpt);
+}
+
+static void write_crash_report(const char *err)
+{
+ char *loc = git_pathdup("fast_import_crash_%"PRIuMAX, (uintmax_t) getpid());
+ FILE *rpt = fopen(loc, "w");
+ struct branch *b;
+ unsigned long lu;
+ struct recent_command *rc;
+
+ if (!rpt) {
+ error_errno("can't write crash report %s", loc);
+ free(loc);
+ return;
+ }
+
+ fprintf(stderr, "fast-import: dumping crash report to %s\n", loc);
+
+ fprintf(rpt, "fast-import crash report:\n");
+ fprintf(rpt, " fast-import process: %"PRIuMAX"\n", (uintmax_t) getpid());
+ fprintf(rpt, " parent process : %"PRIuMAX"\n", (uintmax_t) getppid());
+ fprintf(rpt, " at %s\n", show_date(time(NULL), 0, DATE_MODE(ISO8601)));
+ fputc('\n', rpt);
+
+ fputs("fatal: ", rpt);
+ fputs(err, rpt);
+ fputc('\n', rpt);
+
+ fputc('\n', rpt);
+ fputs("Most Recent Commands Before Crash\n", rpt);
+ fputs("---------------------------------\n", rpt);
+ for (rc = cmd_hist.next; rc != &cmd_hist; rc = rc->next) {
+ if (rc->next == &cmd_hist)
+ fputs("* ", rpt);
+ else
+ fputs(" ", rpt);
+ fputs(rc->buf, rpt);
+ fputc('\n', rpt);
+ }
+
+ fputc('\n', rpt);
+ fputs("Active Branch LRU\n", rpt);
+ fputs("-----------------\n", rpt);
+ fprintf(rpt, " active_branches = %lu cur, %lu max\n",
+ cur_active_branches,
+ max_active_branches);
+ fputc('\n', rpt);
+ fputs(" pos clock name\n", rpt);
+ fputs(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", rpt);
+ for (b = active_branches, lu = 0; b; b = b->active_next_branch)
+ fprintf(rpt, " %2lu) %6" PRIuMAX" %s\n",
+ ++lu, b->last_commit, b->name);
+
+ fputc('\n', rpt);
+ fputs("Inactive Branches\n", rpt);
+ fputs("-----------------\n", rpt);
+ for (lu = 0; lu < branch_table_sz; lu++) {
+ for (b = branch_table[lu]; b; b = b->table_next_branch)
+ write_branch_report(rpt, b);
+ }
+
+ if (first_tag) {
+ struct tag *tg;
+ fputc('\n', rpt);
+ fputs("Annotated Tags\n", rpt);
+ fputs("--------------\n", rpt);
+ for (tg = first_tag; tg; tg = tg->next_tag) {
+ fputs(oid_to_hex(&tg->oid), rpt);
+ fputc(' ', rpt);
+ fputs(tg->name, rpt);
+ fputc('\n', rpt);
+ }
+ }
+
+ fputc('\n', rpt);
+ fputs("Marks\n", rpt);
+ fputs("-----\n", rpt);
+ if (export_marks_file)
+ fprintf(rpt, " exported to %s\n", export_marks_file);
+ else
+ for_each_mark(marks, 0, dump_marks_fn, rpt);
+
+ fputc('\n', rpt);
+ fputs("-------------------\n", rpt);
+ fputs("END OF CRASH REPORT\n", rpt);
+ fclose(rpt);
+ free(loc);
+}
+
+static void end_packfile(void);
+static void unkeep_all_packs(void);
+static void dump_marks(void);
+
+static NORETURN void die_nicely(const char *err, va_list params)
+{
+ static int zombie;
+ char message[2 * PATH_MAX];
+
+ vsnprintf(message, sizeof(message), err, params);
+ fputs("fatal: ", stderr);
+ fputs(message, stderr);
+ fputc('\n', stderr);
+
+ if (!zombie) {
+ zombie = 1;
+ write_crash_report(message);
+ end_packfile();
+ unkeep_all_packs();
+ dump_marks();
+ }
+ exit(128);
+}
+
+#ifndef SIGUSR1 /* Windows, for example */
+
+static void set_checkpoint_signal(void)
+{
+}
+
+#else
+
+static void checkpoint_signal(int signo)
+{
+ checkpoint_requested = 1;
+}
+
+static void set_checkpoint_signal(void)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = checkpoint_signal;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGUSR1, &sa, NULL);
+}
+
+#endif
+
+static void alloc_objects(unsigned int cnt)
+{
+ struct object_entry_pool *b;
+
+ b = xmalloc(sizeof(struct object_entry_pool)
+ + cnt * sizeof(struct object_entry));
+ b->next_pool = blocks;
+ b->next_free = b->entries;
+ b->end = b->entries + cnt;
+ blocks = b;
+ alloc_count += cnt;
+}
+
+static struct object_entry *new_object(struct object_id *oid)
+{
+ struct object_entry *e;
+
+ if (blocks->next_free == blocks->end)
+ alloc_objects(object_entry_alloc);
+
+ e = blocks->next_free++;
+ oidcpy(&e->idx.oid, oid);
+ return e;
+}
+
+static struct object_entry *find_object(struct object_id *oid)
+{
+ return hashmap_get_entry_from_hash(&object_table, oidhash(oid), oid,
+ struct object_entry, ent);
+}
+
+static struct object_entry *insert_object(struct object_id *oid)
+{
+ struct object_entry *e;
+ unsigned int hash = oidhash(oid);
+
+ e = hashmap_get_entry_from_hash(&object_table, hash, oid,
+ struct object_entry, ent);
+ if (!e) {
+ e = new_object(oid);
+ e->idx.offset = 0;
+ hashmap_entry_init(&e->ent, hash);
+ hashmap_add(&object_table, &e->ent);
+ }
+
+ return e;
+}
+
+static void invalidate_pack_id(unsigned int id)
+{
+ unsigned long lu;
+ struct tag *t;
+ struct hashmap_iter iter;
+ struct object_entry *e;
+
+ hashmap_for_each_entry(&object_table, &iter, e, ent) {
+ if (e->pack_id == id)
+ e->pack_id = MAX_PACK_ID;
+ }
+
+ for (lu = 0; lu < branch_table_sz; lu++) {
+ struct branch *b;
+
+ for (b = branch_table[lu]; b; b = b->table_next_branch)
+ if (b->pack_id == id)
+ b->pack_id = MAX_PACK_ID;
+ }
+
+ for (t = first_tag; t; t = t->next_tag)
+ if (t->pack_id == id)
+ t->pack_id = MAX_PACK_ID;
+}
+
+static unsigned int hc_str(const char *s, size_t len)
+{
+ unsigned int r = 0;
+ while (len-- > 0)
+ r = r * 31 + *s++;
+ return r;
+}
+
+static void insert_mark(struct mark_set *s, uintmax_t idnum, struct object_entry *oe)
+{
+ while ((idnum >> s->shift) >= 1024) {
+ s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+ s->shift = marks->shift + 10;
+ s->data.sets[0] = marks;
+ marks = s;
+ }
+ while (s->shift) {
+ uintmax_t i = idnum >> s->shift;
+ idnum -= i << s->shift;
+ if (!s->data.sets[i]) {
+ s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+ s->data.sets[i]->shift = s->shift - 10;
+ }
+ s = s->data.sets[i];
+ }
+ if (!s->data.marked[idnum])
+ marks_set_count++;
+ s->data.marked[idnum] = oe;
+}
+
+static void *find_mark(struct mark_set *s, uintmax_t idnum)
+{
+ uintmax_t orig_idnum = idnum;
+ struct object_entry *oe = NULL;
+ if ((idnum >> s->shift) < 1024) {
+ while (s && s->shift) {
+ uintmax_t i = idnum >> s->shift;
+ idnum -= i << s->shift;
+ s = s->data.sets[i];
+ }
+ if (s)
+ oe = s->data.marked[idnum];
+ }
+ if (!oe)
+ die("mark :%" PRIuMAX " not declared", orig_idnum);
+ return oe;
+}
+
+static struct atom_str *to_atom(const char *s, unsigned short len)
+{
+ unsigned int hc = hc_str(s, len) % atom_table_sz;
+ struct atom_str *c;
+
+ for (c = atom_table[hc]; c; c = c->next_atom)
+ if (c->str_len == len && !strncmp(s, c->str_dat, len))
+ return c;
+
+ c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
+ c->str_len = len;
+ memcpy(c->str_dat, s, len);
+ c->str_dat[len] = 0;
+ c->next_atom = atom_table[hc];
+ atom_table[hc] = c;
+ atom_cnt++;
+ return c;
+}
+
+static struct branch *lookup_branch(const char *name)
+{
+ unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
+ struct branch *b;
+
+ for (b = branch_table[hc]; b; b = b->table_next_branch)
+ if (!strcmp(name, b->name))
+ return b;
+ return NULL;
+}
+
+static struct branch *new_branch(const char *name)
+{
+ unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
+ struct branch *b = lookup_branch(name);
+
+ if (b)
+ die("Invalid attempt to create duplicate branch: %s", name);
+ if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
+ die("Branch name doesn't conform to GIT standards: %s", name);
+
+ b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
+ b->name = mem_pool_strdup(&fi_mem_pool, name);
+ b->table_next_branch = branch_table[hc];
+ b->branch_tree.versions[0].mode = S_IFDIR;
+ b->branch_tree.versions[1].mode = S_IFDIR;
+ b->num_notes = 0;
+ b->active = 0;
+ b->pack_id = MAX_PACK_ID;
+ branch_table[hc] = b;
+ branch_count++;
+ return b;
+}
+
+static unsigned int hc_entries(unsigned int cnt)
+{
+ cnt = cnt & 7 ? (cnt / 8) + 1 : cnt / 8;
+ return cnt < avail_tree_table_sz ? cnt : avail_tree_table_sz - 1;
+}
+
+static struct tree_content *new_tree_content(unsigned int cnt)
+{
+ struct avail_tree_content *f, *l = NULL;
+ struct tree_content *t;
+ unsigned int hc = hc_entries(cnt);
+
+ for (f = avail_tree_table[hc]; f; l = f, f = f->next_avail)
+ if (f->entry_capacity >= cnt)
+ break;
+
+ if (f) {
+ if (l)
+ l->next_avail = f->next_avail;
+ else
+ avail_tree_table[hc] = f->next_avail;
+ } else {
+ cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
+ f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
+ f->entry_capacity = cnt;
+ }
+
+ t = (struct tree_content*)f;
+ t->entry_count = 0;
+ t->delta_depth = 0;
+ return t;
+}
+
+static void release_tree_entry(struct tree_entry *e);
+static void release_tree_content(struct tree_content *t)
+{
+ struct avail_tree_content *f = (struct avail_tree_content*)t;
+ unsigned int hc = hc_entries(f->entry_capacity);
+ f->next_avail = avail_tree_table[hc];
+ avail_tree_table[hc] = f;
+}
+
+static void release_tree_content_recursive(struct tree_content *t)
+{
+ unsigned int i;
+ for (i = 0; i < t->entry_count; i++)
+ release_tree_entry(t->entries[i]);
+ release_tree_content(t);
+}
+
+static struct tree_content *grow_tree_content(
+ struct tree_content *t,
+ int amt)
+{
+ struct tree_content *r = new_tree_content(t->entry_count + amt);
+ r->entry_count = t->entry_count;
+ r->delta_depth = t->delta_depth;
+ COPY_ARRAY(r->entries, t->entries, t->entry_count);
+ release_tree_content(t);
+ return r;
+}
+
+static struct tree_entry *new_tree_entry(void)
+{
+ struct tree_entry *e;
+
+ if (!avail_tree_entry) {
+ unsigned int n = tree_entry_alloc;
+ tree_entry_allocd += n * sizeof(struct tree_entry);
+ ALLOC_ARRAY(e, n);
+ avail_tree_entry = e;
+ while (n-- > 1) {
+ *((void**)e) = e + 1;
+ e++;
+ }
+ *((void**)e) = NULL;
+ }
+
+ e = avail_tree_entry;
+ avail_tree_entry = *((void**)e);
+ return e;
+}
+
+static void release_tree_entry(struct tree_entry *e)
+{
+ if (e->tree)
+ release_tree_content_recursive(e->tree);
+ *((void**)e) = avail_tree_entry;
+ avail_tree_entry = e;
+}
+
+static struct tree_content *dup_tree_content(struct tree_content *s)
+{
+ struct tree_content *d;
+ struct tree_entry *a, *b;
+ unsigned int i;
+
+ if (!s)
+ return NULL;
+ d = new_tree_content(s->entry_count);
+ for (i = 0; i < s->entry_count; i++) {
+ a = s->entries[i];
+ b = new_tree_entry();
+ memcpy(b, a, sizeof(*a));
+ if (a->tree && is_null_oid(&b->versions[1].oid))
+ b->tree = dup_tree_content(a->tree);
+ else
+ b->tree = NULL;
+ d->entries[i] = b;
+ }
+ d->entry_count = s->entry_count;
+ d->delta_depth = s->delta_depth;
+
+ return d;
+}
+
+static void start_packfile(void)
+{
+ struct strbuf tmp_file = STRBUF_INIT;
+ struct packed_git *p;
+ struct pack_header hdr;
+ int pack_fd;
+
+ pack_fd = odb_mkstemp(&tmp_file, "pack/tmp_pack_XXXXXX");
+ FLEX_ALLOC_STR(p, pack_name, tmp_file.buf);
+ strbuf_release(&tmp_file);
+
+ p->pack_fd = pack_fd;
+ p->do_not_close = 1;
+ pack_file = hashfd(pack_fd, p->pack_name);
+
+ hdr.hdr_signature = htonl(PACK_SIGNATURE);
+ hdr.hdr_version = htonl(2);
+ hdr.hdr_entries = 0;
+ hashwrite(pack_file, &hdr, sizeof(hdr));
+
+ pack_data = p;
+ pack_size = sizeof(hdr);
+ object_count = 0;
+
+ REALLOC_ARRAY(all_packs, pack_id + 1);
+ all_packs[pack_id] = p;
+}
+
+static const char *create_index(void)
+{
+ const char *tmpfile;
+ struct pack_idx_entry **idx, **c, **last;
+ struct object_entry *e;
+ struct object_entry_pool *o;
+
+ /* Build the table of object IDs. */
+ ALLOC_ARRAY(idx, object_count);
+ c = idx;
+ for (o = blocks; o; o = o->next_pool)
+ for (e = o->next_free; e-- != o->entries;)
+ if (pack_id == e->pack_id)
+ *c++ = &e->idx;
+ last = idx + object_count;
+ if (c != last)
+ die("internal consistency error creating the index");
+
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
+ pack_data->hash);
+ free(idx);
+ return tmpfile;
+}
+
+static char *keep_pack(const char *curr_index_name)
+{
+ static const char *keep_msg = "fast-import";
+ struct strbuf name = STRBUF_INIT;
+ int keep_fd;
+
+ odb_pack_name(&name, pack_data->hash, "keep");
+ keep_fd = odb_pack_keep(name.buf);
+ if (keep_fd < 0)
+ die_errno("cannot create keep file");
+ write_or_die(keep_fd, keep_msg, strlen(keep_msg));
+ if (close(keep_fd))
+ die_errno("failed to write keep file");
+
+ odb_pack_name(&name, pack_data->hash, "pack");
+ if (finalize_object_file(pack_data->pack_name, name.buf))
+ die("cannot store pack file");
+
+ odb_pack_name(&name, pack_data->hash, "idx");
+ if (finalize_object_file(curr_index_name, name.buf))
+ die("cannot store index file");
+ free((void *)curr_index_name);
+ return strbuf_detach(&name, NULL);
+}
+
+static void unkeep_all_packs(void)
+{
+ struct strbuf name = STRBUF_INIT;
+ int k;
+
+ for (k = 0; k < pack_id; k++) {
+ struct packed_git *p = all_packs[k];
+ odb_pack_name(&name, p->hash, "keep");
+ unlink_or_warn(name.buf);
+ }
+ strbuf_release(&name);
+}
+
+static int loosen_small_pack(const struct packed_git *p)
+{
+ struct child_process unpack = CHILD_PROCESS_INIT;
+
+ if (lseek(p->pack_fd, 0, SEEK_SET) < 0)
+ die_errno("Failed seeking to start of '%s'", p->pack_name);
+
+ unpack.in = p->pack_fd;
+ unpack.git_cmd = 1;
+ unpack.stdout_to_stderr = 1;
+ strvec_push(&unpack.args, "unpack-objects");
+ if (!show_stats)
+ strvec_push(&unpack.args, "-q");
+
+ return run_command(&unpack);
+}
+
+static void end_packfile(void)
+{
+ static int running;
+
+ if (running || !pack_data)
+ return;
+
+ running = 1;
+ clear_delta_base_cache();
+ if (object_count) {
+ struct packed_git *new_p;
+ struct object_id cur_pack_oid;
+ char *idx_name;
+ int i;
+ struct branch *b;
+ struct tag *t;
+
+ close_pack_windows(pack_data);
+ finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
+ fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
+ pack_data->pack_name, object_count,
+ cur_pack_oid.hash, pack_size);
+
+ if (object_count <= unpack_limit) {
+ if (!loosen_small_pack(pack_data)) {
+ invalidate_pack_id(pack_id);
+ goto discard_pack;
+ }
+ }
+
+ close(pack_data->pack_fd);
+ idx_name = keep_pack(create_index());
+
+ /* Register the packfile with core git's machinery. */
+ new_p = add_packed_git(idx_name, strlen(idx_name), 1);
+ if (!new_p)
+ die("core git rejected index %s", idx_name);
+ all_packs[pack_id] = new_p;
+ install_packed_git(the_repository, new_p);
+ free(idx_name);
+
+ /* Print the boundary */
+ if (pack_edges) {
+ fprintf(pack_edges, "%s:", new_p->pack_name);
+ for (i = 0; i < branch_table_sz; i++) {
+ for (b = branch_table[i]; b; b = b->table_next_branch) {
+ if (b->pack_id == pack_id)
+ fprintf(pack_edges, " %s",
+ oid_to_hex(&b->oid));
+ }
+ }
+ for (t = first_tag; t; t = t->next_tag) {
+ if (t->pack_id == pack_id)
+ fprintf(pack_edges, " %s",
+ oid_to_hex(&t->oid));
+ }
+ fputc('\n', pack_edges);
+ fflush(pack_edges);
+ }
+
+ pack_id++;
+ }
+ else {
+discard_pack:
+ close(pack_data->pack_fd);
+ unlink_or_warn(pack_data->pack_name);
+ }
+ FREE_AND_NULL(pack_data);
+ running = 0;
+
+ /* We can't carry a delta across packfiles. */
+ strbuf_release(&last_blob.data);
+ last_blob.offset = 0;
+ last_blob.depth = 0;
+}
+
+static void cycle_packfile(void)
+{
+ end_packfile();
+ start_packfile();
+}
+
+static int store_object(
+ enum object_type type,
+ struct strbuf *dat,
+ struct last_object *last,
+ struct object_id *oidout,
+ uintmax_t mark)
+{
+ void *out, *delta;
+ struct object_entry *e;
+ unsigned char hdr[96];
+ struct object_id oid;
+ unsigned long hdrlen, deltalen;
+ git_hash_ctx c;
+ git_zstream s;
+
+ hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
+ type_name(type), (unsigned long)dat->len) + 1;
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
+ if (oidout)
+ oidcpy(oidout, &oid);
+
+ e = insert_object(&oid);
+ if (mark)
+ insert_mark(marks, mark, e);
+ if (e->idx.offset) {
+ duplicate_count_by_type[type]++;
+ return 1;
+ } else if (find_sha1_pack(oid.hash,
+ get_all_packs(the_repository))) {
+ e->type = type;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ duplicate_count_by_type[type]++;
+ return 1;
+ }
+
+ if (last && last->data.len && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
+ delta_count_attempts_by_type[type]++;
+ delta = diff_delta(last->data.buf, last->data.len,
+ dat->buf, dat->len,
+ &deltalen, dat->len - the_hash_algo->rawsz);
+ } else
+ delta = NULL;
+
+ git_deflate_init(&s, pack_compression_level);
+ if (delta) {
+ s.next_in = delta;
+ s.avail_in = deltalen;
+ } else {
+ s.next_in = (void *)dat->buf;
+ s.avail_in = dat->len;
+ }
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
+ s.next_out = out = xmalloc(s.avail_out);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
+
+ /* Determine if we should auto-checkpoint. */
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
+
+ /* This new object needs to *not* have the current pack_id. */
+ e->pack_id = pack_id + 1;
+ cycle_packfile();
+
+ /* We cannot carry a delta into the new pack. */
+ if (delta) {
+ FREE_AND_NULL(delta);
+
+ git_deflate_init(&s, pack_compression_level);
+ s.next_in = (void *)dat->buf;
+ s.avail_in = dat->len;
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
+ s.next_out = out = xrealloc(out, s.avail_out);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
+ }
+ }
+
+ e->type = type;
+ e->pack_id = pack_id;
+ e->idx.offset = pack_size;
+ object_count++;
+ object_count_by_type[type]++;
+
+ crc32_begin(pack_file);
+
+ if (delta) {
+ off_t ofs = e->idx.offset - last->offset;
+ unsigned pos = sizeof(hdr) - 1;
+
+ delta_count_by_type[type]++;
+ e->depth = last->depth + 1;
+
+ hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
+ OBJ_OFS_DELTA, deltalen);
+ hashwrite(pack_file, hdr, hdrlen);
+ pack_size += hdrlen;
+
+ hdr[pos] = ofs & 127;
+ while (ofs >>= 7)
+ hdr[--pos] = 128 | (--ofs & 127);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
+ pack_size += sizeof(hdr) - pos;
+ } else {
+ e->depth = 0;
+ hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
+ type, dat->len);
+ hashwrite(pack_file, hdr, hdrlen);
+ pack_size += hdrlen;
+ }
+
+ hashwrite(pack_file, out, s.total_out);
+ pack_size += s.total_out;
+
+ e->idx.crc32 = crc32_end(pack_file);
+
+ free(out);
+ free(delta);
+ if (last) {
+ if (last->no_swap) {
+ last->data = *dat;
+ } else {
+ strbuf_swap(&last->data, dat);
+ }
+ last->offset = e->idx.offset;
+ last->depth = e->depth;
+ }
+ return 0;
+}
+
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
+{
+ if (hashfile_truncate(pack_file, checkpoint))
+ die_errno("cannot truncate pack to skip duplicate");
+ pack_size = checkpoint->offset;
+}
+
+static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
+{
+ size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
+ unsigned char *in_buf = xmalloc(in_sz);
+ unsigned char *out_buf = xmalloc(out_sz);
+ struct object_entry *e;
+ struct object_id oid;
+ unsigned long hdrlen;
+ off_t offset;
+ git_hash_ctx c;
+ git_zstream s;
+ struct hashfile_checkpoint checkpoint;
+ int status = Z_OK;
+
+ /* Determine if we should auto-checkpoint. */
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
+ cycle_packfile();
+
+ hashfile_checkpoint(pack_file, &checkpoint);
+ offset = checkpoint.offset;
+
+ hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
+
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
+
+ crc32_begin(pack_file);
+
+ git_deflate_init(&s, pack_compression_level);
+
+ hdrlen = encode_in_pack_object_header(out_buf, out_sz, OBJ_BLOB, len);
+
+ s.next_out = out_buf + hdrlen;
+ s.avail_out = out_sz - hdrlen;
+
+ while (status != Z_STREAM_END) {
+ if (0 < len && !s.avail_in) {
+ size_t cnt = in_sz < len ? in_sz : (size_t)len;
+ size_t n = fread(in_buf, 1, cnt, stdin);
+ if (!n && feof(stdin))
+ die("EOF in data (%" PRIuMAX " bytes remaining)", len);
+
+ the_hash_algo->update_fn(&c, in_buf, n);
+ s.next_in = in_buf;
+ s.avail_in = n;
+ len -= n;
+ }
+
+ status = git_deflate(&s, len ? 0 : Z_FINISH);
+
+ if (!s.avail_out || status == Z_STREAM_END) {
+ size_t n = s.next_out - out_buf;
+ hashwrite(pack_file, out_buf, n);
+ pack_size += n;
+ s.next_out = out_buf;
+ s.avail_out = out_sz;
+ }
+
+ switch (status) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ continue;
+ default:
+ die("unexpected deflate failure: %d", status);
+ }
+ }
+ git_deflate_end(&s);
+ the_hash_algo->final_fn(oid.hash, &c);
+
+ if (oidout)
+ oidcpy(oidout, &oid);
+
+ e = insert_object(&oid);
+
+ if (mark)
+ insert_mark(marks, mark, e);
+
+ if (e->idx.offset) {
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(&checkpoint);
+
+ } else if (find_sha1_pack(oid.hash,
+ get_all_packs(the_repository))) {
+ e->type = OBJ_BLOB;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ duplicate_count_by_type[OBJ_BLOB]++;
+ truncate_pack(&checkpoint);
+
+ } else {
+ e->depth = 0;
+ e->type = OBJ_BLOB;
+ e->pack_id = pack_id;
+ e->idx.offset = offset;
+ e->idx.crc32 = crc32_end(pack_file);
+ object_count++;
+ object_count_by_type[OBJ_BLOB]++;
+ }
+
+ free(in_buf);
+ free(out_buf);
+}
+
+/* All calls must be guarded by find_object() or find_mark() to
+ * ensure the 'struct object_entry' passed was written by this
+ * process instance. We unpack the entry by the offset, avoiding
+ * the need for the corresponding .idx file. This unpacking rule
+ * works because we only use OBJ_REF_DELTA within the packfiles
+ * created by fast-import.
+ *
+ * oe must not be NULL. Such an oe usually comes from giving
+ * an unknown SHA-1 to find_object() or an undefined mark to
+ * find_mark(). Callers must test for this condition and use
+ * the standard read_sha1_file() when it happens.
+ *
+ * oe->pack_id must not be MAX_PACK_ID. Such an oe is usually from
+ * find_mark(), where the mark was reloaded from an existing marks
+ * file and is referencing an object that this fast-import process
+ * instance did not write out to a packfile. Callers must test for
+ * this condition and use read_sha1_file() instead.
+ */
+static void *gfi_unpack_entry(
+ struct object_entry *oe,
+ unsigned long *sizep)
+{
+ enum object_type type;
+ struct packed_git *p = all_packs[oe->pack_id];
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
+ /* The object is stored in the packfile we are writing to
+ * and we have modified it since the last time we scanned
+ * back to read a previously written object. If an old
+ * window covered [p->pack_size, p->pack_size + rawsz) its
+ * data is stale and is not valid. Closing all windows
+ * and updating the packfile length ensures we can read
+ * the newly written data.
+ */
+ close_pack_windows(p);
+ hashflush(pack_file);
+
+ /* We have to offer rawsz bytes additional on the end of
+ * the packfile as the core unpacker code assumes the
+ * footer is present at the file end and must promise
+ * at least rawsz bytes within any window it maps. But
+ * we don't actually create the footer here.
+ */
+ p->pack_size = pack_size + the_hash_algo->rawsz;
+ }
+ return unpack_entry(the_repository, p, oe->idx.offset, &type, sizep);
+}
+
+static const char *get_mode(const char *str, uint16_t *modep)
+{
+ unsigned char c;
+ uint16_t mode = 0;
+
+ while ((c = *str++) != ' ') {
+ if (c < '0' || c > '7')
+ return NULL;
+ mode = (mode << 3) + (c - '0');
+ }
+ *modep = mode;
+ return str;
+}
+
+static void load_tree(struct tree_entry *root)
+{
+ struct object_id *oid = &root->versions[1].oid;
+ struct object_entry *myoe;
+ struct tree_content *t;
+ unsigned long size;
+ char *buf;
+ const char *c;
+
+ root->tree = t = new_tree_content(8);
+ if (is_null_oid(oid))
+ return;
+
+ myoe = find_object(oid);
+ if (myoe && myoe->pack_id != MAX_PACK_ID) {
+ if (myoe->type != OBJ_TREE)
+ die("Not a tree: %s", oid_to_hex(oid));
+ t->delta_depth = myoe->depth;
+ buf = gfi_unpack_entry(myoe, &size);
+ if (!buf)
+ die("Can't load tree %s", oid_to_hex(oid));
+ } else {
+ enum object_type type;
+ buf = read_object_file(oid, &type, &size);
+ if (!buf || type != OBJ_TREE)
+ die("Can't load tree %s", oid_to_hex(oid));
+ }
+
+ c = buf;
+ while (c != (buf + size)) {
+ struct tree_entry *e = new_tree_entry();
+
+ if (t->entry_count == t->entry_capacity)
+ root->tree = t = grow_tree_content(t, t->entry_count);
+ t->entries[t->entry_count++] = e;
+
+ e->tree = NULL;
+ c = get_mode(c, &e->versions[1].mode);
+ if (!c)
+ die("Corrupt mode in %s", oid_to_hex(oid));
+ e->versions[0].mode = e->versions[1].mode;
+ e->name = to_atom(c, strlen(c));
+ c += e->name->str_len + 1;
+ hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
+ hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
+ c += the_hash_algo->rawsz;
+ }
+ free(buf);
+}
+
+static int tecmp0 (const void *_a, const void *_b)
+{
+ struct tree_entry *a = *((struct tree_entry**)_a);
+ struct tree_entry *b = *((struct tree_entry**)_b);
+ return base_name_compare(
+ a->name->str_dat, a->name->str_len, a->versions[0].mode,
+ b->name->str_dat, b->name->str_len, b->versions[0].mode);
+}
+
+static int tecmp1 (const void *_a, const void *_b)
+{
+ struct tree_entry *a = *((struct tree_entry**)_a);
+ struct tree_entry *b = *((struct tree_entry**)_b);
+ return base_name_compare(
+ a->name->str_dat, a->name->str_len, a->versions[1].mode,
+ b->name->str_dat, b->name->str_len, b->versions[1].mode);
+}
+
+static void mktree(struct tree_content *t, int v, struct strbuf *b)
+{
+ size_t maxlen = 0;
+ unsigned int i;
+
+ if (!v)
+ QSORT(t->entries, t->entry_count, tecmp0);
+ else
+ QSORT(t->entries, t->entry_count, tecmp1);
+
+ for (i = 0; i < t->entry_count; i++) {
+ if (t->entries[i]->versions[v].mode)
+ maxlen += t->entries[i]->name->str_len + 34;
+ }
+
+ strbuf_reset(b);
+ strbuf_grow(b, maxlen);
+ for (i = 0; i < t->entry_count; i++) {
+ struct tree_entry *e = t->entries[i];
+ if (!e->versions[v].mode)
+ continue;
+ strbuf_addf(b, "%o %s%c",
+ (unsigned int)(e->versions[v].mode & ~NO_DELTA),
+ e->name->str_dat, '\0');
+ strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
+ }
+}
+
+static void store_tree(struct tree_entry *root)
+{
+ struct tree_content *t;
+ unsigned int i, j, del;
+ struct last_object lo = { STRBUF_INIT, 0, 0, /* no_swap */ 1 };
+ struct object_entry *le = NULL;
+
+ if (!is_null_oid(&root->versions[1].oid))
+ return;
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+
+ for (i = 0; i < t->entry_count; i++) {
+ if (t->entries[i]->tree)
+ store_tree(t->entries[i]);
+ }
+
+ if (!(root->versions[0].mode & NO_DELTA))
+ le = find_object(&root->versions[0].oid);
+ if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) {
+ mktree(t, 0, &old_tree);
+ lo.data = old_tree;
+ lo.offset = le->idx.offset;
+ lo.depth = t->delta_depth;
+ }
+
+ mktree(t, 1, &new_tree);
+ store_object(OBJ_TREE, &new_tree, &lo, &root->versions[1].oid, 0);
+
+ t->delta_depth = lo.depth;
+ for (i = 0, j = 0, del = 0; i < t->entry_count; i++) {
+ struct tree_entry *e = t->entries[i];
+ if (e->versions[1].mode) {
+ e->versions[0].mode = e->versions[1].mode;
+ oidcpy(&e->versions[0].oid, &e->versions[1].oid);
+ t->entries[j++] = e;
+ } else {
+ release_tree_entry(e);
+ del++;
+ }
+ }
+ t->entry_count -= del;
+}
+
+static void tree_content_replace(
+ struct tree_entry *root,
+ const struct object_id *oid,
+ const uint16_t mode,
+ struct tree_content *newtree)
+{
+ if (!S_ISDIR(mode))
+ die("Root cannot be a non-directory");
+ oidclr(&root->versions[0].oid);
+ oidcpy(&root->versions[1].oid, oid);
+ if (root->tree)
+ release_tree_content_recursive(root->tree);
+ root->tree = newtree;
+}
+
+static int tree_content_set(
+ struct tree_entry *root,
+ const char *p,
+ const struct object_id *oid,
+ const uint16_t mode,
+ struct tree_content *subtree)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+ if (!n)
+ die("Empty path component found in input");
+ if (!*slash1 && !S_ISDIR(mode) && subtree)
+ die("Non-directories cannot have subtrees");
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (!*slash1) {
+ if (!S_ISDIR(mode)
+ && e->versions[1].mode == mode
+ && oideq(&e->versions[1].oid, oid))
+ return 0;
+ e->versions[1].mode = mode;
+ oidcpy(&e->versions[1].oid, oid);
+ if (e->tree)
+ release_tree_content_recursive(e->tree);
+ e->tree = subtree;
+
+ /*
+ * We need to leave e->versions[0].sha1 alone
+ * to avoid modifying the preimage tree used
+ * when writing out the parent directory.
+ * But after replacing the subdir with a
+ * completely different one, it's not a good
+ * delta base any more, and besides, we've
+ * thrown away the tree entries needed to
+ * make a delta against it.
+ *
+ * So let's just explicitly disable deltas
+ * for the subtree.
+ */
+ if (S_ISDIR(e->versions[0].mode))
+ e->versions[0].mode |= NO_DELTA;
+
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ if (!S_ISDIR(e->versions[1].mode)) {
+ e->tree = new_tree_content(8);
+ e->versions[1].mode = S_IFDIR;
+ }
+ if (!e->tree)
+ load_tree(e);
+ if (tree_content_set(e, slash1 + 1, oid, mode, subtree)) {
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ return 0;
+ }
+ }
+
+ if (t->entry_count == t->entry_capacity)
+ root->tree = t = grow_tree_content(t, t->entry_count);
+ e = new_tree_entry();
+ e->name = to_atom(p, n);
+ e->versions[0].mode = 0;
+ oidclr(&e->versions[0].oid);
+ t->entries[t->entry_count++] = e;
+ if (*slash1) {
+ e->tree = new_tree_content(8);
+ e->versions[1].mode = S_IFDIR;
+ tree_content_set(e, slash1 + 1, oid, mode, subtree);
+ } else {
+ e->tree = subtree;
+ e->versions[1].mode = mode;
+ oidcpy(&e->versions[1].oid, oid);
+ }
+ oidclr(&root->versions[1].oid);
+ return 1;
+}
+
+static int tree_content_remove(
+ struct tree_entry *root,
+ const char *p,
+ struct tree_entry *backup_leaf,
+ int allow_root)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+
+ if (!root->tree)
+ load_tree(root);
+
+ if (!*p && allow_root) {
+ e = root;
+ goto del_entry;
+ }
+
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (*slash1 && !S_ISDIR(e->versions[1].mode))
+ /*
+ * If p names a file in some subdirectory, and a
+ * file or symlink matching the name of the
+ * parent directory of p exists, then p cannot
+ * exist and need not be deleted.
+ */
+ return 1;
+ if (!*slash1 || !S_ISDIR(e->versions[1].mode))
+ goto del_entry;
+ if (!e->tree)
+ load_tree(e);
+ if (tree_content_remove(e, slash1 + 1, backup_leaf, 0)) {
+ for (n = 0; n < e->tree->entry_count; n++) {
+ if (e->tree->entries[n]->versions[1].mode) {
+ oidclr(&root->versions[1].oid);
+ return 1;
+ }
+ }
+ backup_leaf = NULL;
+ goto del_entry;
+ }
+ return 0;
+ }
+ }
+ return 0;
+
+del_entry:
+ if (backup_leaf)
+ memcpy(backup_leaf, e, sizeof(*backup_leaf));
+ else if (e->tree)
+ release_tree_content_recursive(e->tree);
+ e->tree = NULL;
+ e->versions[1].mode = 0;
+ oidclr(&e->versions[1].oid);
+ oidclr(&root->versions[1].oid);
+ return 1;
+}
+
+static int tree_content_get(
+ struct tree_entry *root,
+ const char *p,
+ struct tree_entry *leaf,
+ int allow_root)
+{
+ struct tree_content *t;
+ const char *slash1;
+ unsigned int i, n;
+ struct tree_entry *e;
+
+ slash1 = strchrnul(p, '/');
+ n = slash1 - p;
+ if (!n && !allow_root)
+ die("Empty path component found in input");
+
+ if (!root->tree)
+ load_tree(root);
+
+ if (!n) {
+ e = root;
+ goto found_entry;
+ }
+
+ t = root->tree;
+ for (i = 0; i < t->entry_count; i++) {
+ e = t->entries[i];
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
+ if (!*slash1)
+ goto found_entry;
+ if (!S_ISDIR(e->versions[1].mode))
+ return 0;
+ if (!e->tree)
+ load_tree(e);
+ return tree_content_get(e, slash1 + 1, leaf, 0);
+ }
+ }
+ return 0;
+
+found_entry:
+ memcpy(leaf, e, sizeof(*leaf));
+ if (e->tree && is_null_oid(&e->versions[1].oid))
+ leaf->tree = dup_tree_content(e->tree);
+ else
+ leaf->tree = NULL;
+ return 1;
+}
+
+static int update_branch(struct branch *b)
+{
+ static const char *msg = "fast-import";
+ struct ref_transaction *transaction;
+ struct object_id old_oid;
+ struct strbuf err = STRBUF_INIT;
+
+ if (is_null_oid(&b->oid)) {
+ if (b->delete)
+ delete_ref(NULL, b->name, NULL, 0);
+ return 0;
+ }
+ if (read_ref(b->name, &old_oid))
+ oidclr(&old_oid);
+ if (!force_update && !is_null_oid(&old_oid)) {
+ struct commit *old_cmit, *new_cmit;
+
+ old_cmit = lookup_commit_reference_gently(the_repository,
+ &old_oid, 0);
+ new_cmit = lookup_commit_reference_gently(the_repository,
+ &b->oid, 0);
+ if (!old_cmit || !new_cmit)
+ return error("Branch %s is missing commits.", b->name);
+
+ if (!in_merge_bases(old_cmit, new_cmit)) {
+ warning("Not updating %s"
+ " (new tip %s does not contain %s)",
+ b->name, oid_to_hex(&b->oid),
+ oid_to_hex(&old_oid));
+ return -1;
+ }
+ }
+ transaction = ref_transaction_begin(&err);
+ if (!transaction ||
+ ref_transaction_update(transaction, b->name, &b->oid, &old_oid,
+ 0, msg, &err) ||
+ ref_transaction_commit(transaction, &err)) {
+ ref_transaction_free(transaction);
+ error("%s", err.buf);
+ strbuf_release(&err);
+ return -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ return 0;
+}
+
+static void dump_branches(void)
+{
+ unsigned int i;
+ struct branch *b;
+
+ for (i = 0; i < branch_table_sz; i++) {
+ for (b = branch_table[i]; b; b = b->table_next_branch)
+ failure |= update_branch(b);
+ }
+}
+
+static void dump_tags(void)
+{
+ static const char *msg = "fast-import";
+ struct tag *t;
+ struct strbuf ref_name = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+
+ transaction = ref_transaction_begin(&err);
+ if (!transaction) {
+ failure |= error("%s", err.buf);
+ goto cleanup;
+ }
+ for (t = first_tag; t; t = t->next_tag) {
+ strbuf_reset(&ref_name);
+ strbuf_addf(&ref_name, "refs/tags/%s", t->name);
+
+ if (ref_transaction_update(transaction, ref_name.buf,
+ &t->oid, NULL, 0, msg, &err)) {
+ failure |= error("%s", err.buf);
+ goto cleanup;
+ }
+ }
+ if (ref_transaction_commit(transaction, &err))
+ failure |= error("%s", err.buf);
+
+ cleanup:
+ ref_transaction_free(transaction);
+ strbuf_release(&ref_name);
+ strbuf_release(&err);
+}
+
+static void dump_marks(void)
+{
+ struct lock_file mark_lock = LOCK_INIT;
+ FILE *f;
+
+ if (!export_marks_file || (import_marks_file && !import_marks_file_done))
+ return;
+
+ if (safe_create_leading_directories_const(export_marks_file)) {
+ failure |= error_errno("unable to create leading directories of %s",
+ export_marks_file);
+ return;
+ }
+
+ if (hold_lock_file_for_update(&mark_lock, export_marks_file, 0) < 0) {
+ failure |= error_errno("Unable to write marks file %s",
+ export_marks_file);
+ return;
+ }
+
+ f = fdopen_lock_file(&mark_lock, "w");
+ if (!f) {
+ int saved_errno = errno;
+ rollback_lock_file(&mark_lock);
+ failure |= error("Unable to write marks file %s: %s",
+ export_marks_file, strerror(saved_errno));
+ return;
+ }
+
+ for_each_mark(marks, 0, dump_marks_fn, f);
+ if (commit_lock_file(&mark_lock)) {
+ failure |= error_errno("Unable to write file %s",
+ export_marks_file);
+ return;
+ }
+}
+
+static void insert_object_entry(struct mark_set *s, struct object_id *oid, uintmax_t mark)
+{
+ struct object_entry *e;
+ e = find_object(oid);
+ if (!e) {
+ enum object_type type = oid_object_info(the_repository,
+ oid, NULL);
+ if (type < 0)
+ die("object not found: %s", oid_to_hex(oid));
+ e = insert_object(oid);
+ e->type = type;
+ e->pack_id = MAX_PACK_ID;
+ e->idx.offset = 1; /* just not zero! */
+ }
+ insert_mark(s, mark, e);
+}
+
+static void insert_oid_entry(struct mark_set *s, struct object_id *oid, uintmax_t mark)
+{
+ insert_mark(s, mark, xmemdupz(oid, sizeof(*oid)));
+}
+
+static void read_mark_file(struct mark_set *s, FILE *f, mark_set_inserter_t inserter)
+{
+ char line[512];
+ while (fgets(line, sizeof(line), f)) {
+ uintmax_t mark;
+ char *end;
+ struct object_id oid;
+
+ /* Ensure SHA-1 objects are padded with zeros. */
+ memset(oid.hash, 0, sizeof(oid.hash));
+
+ end = strchr(line, '\n');
+ if (line[0] != ':' || !end)
+ die("corrupt mark line: %s", line);
+ *end = 0;
+ mark = strtoumax(line + 1, &end, 10);
+ if (!mark || end == line + 1
+ || *end != ' '
+ || get_oid_hex_any(end + 1, &oid) == GIT_HASH_UNKNOWN)
+ die("corrupt mark line: %s", line);
+ inserter(s, &oid, mark);
+ }
+}
+
+static void read_marks(void)
+{
+ FILE *f = fopen(import_marks_file, "r");
+ if (f)
+ ;
+ else if (import_marks_file_ignore_missing && errno == ENOENT)
+ goto done; /* Marks file does not exist */
+ else
+ die_errno("cannot read '%s'", import_marks_file);
+ read_mark_file(marks, f, insert_object_entry);
+ fclose(f);
+done:
+ import_marks_file_done = 1;
+}
+
+
+static int read_next_command(void)
+{
+ static int stdin_eof = 0;
+
+ if (stdin_eof) {
+ unread_command_buf = 0;
+ return EOF;
+ }
+
+ for (;;) {
+ if (unread_command_buf) {
+ unread_command_buf = 0;
+ } else {
+ struct recent_command *rc;
+
+ stdin_eof = strbuf_getline_lf(&command_buf, stdin);
+ if (stdin_eof)
+ return EOF;
+
+ if (!seen_data_command
+ && !starts_with(command_buf.buf, "feature ")
+ && !starts_with(command_buf.buf, "option ")) {
+ parse_argv();
+ }
+
+ rc = rc_free;
+ if (rc)
+ rc_free = rc->next;
+ else {
+ rc = cmd_hist.next;
+ cmd_hist.next = rc->next;
+ cmd_hist.next->prev = &cmd_hist;
+ free(rc->buf);
+ }
+
+ rc->buf = xstrdup(command_buf.buf);
+ rc->prev = cmd_tail;
+ rc->next = cmd_hist.prev;
+ rc->prev->next = rc;
+ cmd_tail = rc;
+ }
+ if (command_buf.buf[0] == '#')
+ continue;
+ return 0;
+ }
+}
+
+static void skip_optional_lf(void)
+{
+ int term_char = fgetc(stdin);
+ if (term_char != '\n' && term_char != EOF)
+ ungetc(term_char, stdin);
+}
+
+static void parse_mark(void)
+{
+ const char *v;
+ if (skip_prefix(command_buf.buf, "mark :", &v)) {
+ next_mark = strtoumax(v, NULL, 10);
+ read_next_command();
+ }
+ else
+ next_mark = 0;
+}
+
+static void parse_original_identifier(void)
+{
+ const char *v;
+ if (skip_prefix(command_buf.buf, "original-oid ", &v))
+ read_next_command();
+}
+
+static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
+{
+ const char *data;
+ strbuf_reset(sb);
+
+ if (!skip_prefix(command_buf.buf, "data ", &data))
+ die("Expected 'data n' command, found: %s", command_buf.buf);
+
+ if (skip_prefix(data, "<<", &data)) {
+ char *term = xstrdup(data);
+ size_t term_len = command_buf.len - (data - command_buf.buf);
+
+ for (;;) {
+ if (strbuf_getline_lf(&command_buf, stdin) == EOF)
+ die("EOF in data (terminator '%s' not found)", term);
+ if (term_len == command_buf.len
+ && !strcmp(term, command_buf.buf))
+ break;
+ strbuf_addbuf(sb, &command_buf);
+ strbuf_addch(sb, '\n');
+ }
+ free(term);
+ }
+ else {
+ uintmax_t len = strtoumax(data, NULL, 10);
+ size_t n = 0, length = (size_t)len;
+
+ if (limit && limit < len) {
+ *len_res = len;
+ return 0;
+ }
+ if (length < len)
+ die("data is too large to use in this context");
+
+ while (n < length) {
+ size_t s = strbuf_fread(sb, length - n, stdin);
+ if (!s && feof(stdin))
+ die("EOF in data (%lu bytes remaining)",
+ (unsigned long)(length - n));
+ n += s;
+ }
+ }
+
+ skip_optional_lf();
+ return 1;
+}
+
+static int validate_raw_date(const char *src, struct strbuf *result, int strict)
+{
+ const char *orig_src = src;
+ char *endp;
+ unsigned long num;
+
+ errno = 0;
+
+ num = strtoul(src, &endp, 10);
+ /*
+ * NEEDSWORK: perhaps check for reasonable values? For example, we
+ * could error on values representing times more than a
+ * day in the future.
+ */
+ if (errno || endp == src || *endp != ' ')
+ return -1;
+
+ src = endp + 1;
+ if (*src != '-' && *src != '+')
+ return -1;
+
+ num = strtoul(src + 1, &endp, 10);
+ /*
+ * NEEDSWORK: check for brokenness other than num > 1400, such as
+ * (num % 100) >= 60, or ((num % 100) % 15) != 0 ?
+ */
+ if (errno || endp == src + 1 || *endp || /* did not parse */
+ (strict && (1400 < num)) /* parsed a broken timezone */
+ )
+ return -1;
+
+ strbuf_addstr(result, orig_src);
+ return 0;
+}
+
+static char *parse_ident(const char *buf)
+{
+ const char *ltgt;
+ size_t name_len;
+ struct strbuf ident = STRBUF_INIT;
+
+ /* ensure there is a space delimiter even if there is no name */
+ if (*buf == '<')
+ --buf;
+
+ ltgt = buf + strcspn(buf, "<>");
+ if (*ltgt != '<')
+ die("Missing < in ident string: %s", buf);
+ if (ltgt != buf && ltgt[-1] != ' ')
+ die("Missing space before < in ident string: %s", buf);
+ ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>");
+ if (*ltgt != '>')
+ die("Missing > in ident string: %s", buf);
+ ltgt++;
+ if (*ltgt != ' ')
+ die("Missing space after > in ident string: %s", buf);
+ ltgt++;
+ name_len = ltgt - buf;
+ strbuf_add(&ident, buf, name_len);
+
+ switch (whenspec) {
+ case WHENSPEC_RAW:
+ if (validate_raw_date(ltgt, &ident, 1) < 0)
+ die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_RAW_PERMISSIVE:
+ if (validate_raw_date(ltgt, &ident, 0) < 0)
+ die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_RFC2822:
+ if (parse_date(ltgt, &ident) < 0)
+ die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf);
+ break;
+ case WHENSPEC_NOW:
+ if (strcmp("now", ltgt))
+ die("Date in ident must be 'now': %s", buf);
+ datestamp(&ident);
+ break;
+ }
+
+ return strbuf_detach(&ident, NULL);
+}
+
+static void parse_and_store_blob(
+ struct last_object *last,
+ struct object_id *oidout,
+ uintmax_t mark)
+{
+ static struct strbuf buf = STRBUF_INIT;
+ uintmax_t len;
+
+ if (parse_data(&buf, big_file_threshold, &len))
+ store_object(OBJ_BLOB, &buf, last, oidout, mark);
+ else {
+ if (last) {
+ strbuf_release(&last->data);
+ last->offset = 0;
+ last->depth = 0;
+ }
+ stream_blob(len, oidout, mark);
+ skip_optional_lf();
+ }
+}
+
+static void parse_new_blob(void)
+{
+ read_next_command();
+ parse_mark();
+ parse_original_identifier();
+ parse_and_store_blob(&last_blob, NULL, next_mark);
+}
+
+static void unload_one_branch(void)
+{
+ while (cur_active_branches
+ && cur_active_branches >= max_active_branches) {
+ uintmax_t min_commit = ULONG_MAX;
+ struct branch *e, *l = NULL, *p = NULL;
+
+ for (e = active_branches; e; e = e->active_next_branch) {
+ if (e->last_commit < min_commit) {
+ p = l;
+ min_commit = e->last_commit;
+ }
+ l = e;
+ }
+
+ if (p) {
+ e = p->active_next_branch;
+ p->active_next_branch = e->active_next_branch;
+ } else {
+ e = active_branches;
+ active_branches = e->active_next_branch;
+ }
+ e->active = 0;
+ e->active_next_branch = NULL;
+ if (e->branch_tree.tree) {
+ release_tree_content_recursive(e->branch_tree.tree);
+ e->branch_tree.tree = NULL;
+ }
+ cur_active_branches--;
+ }
+}
+
+static void load_branch(struct branch *b)
+{
+ load_tree(&b->branch_tree);
+ if (!b->active) {
+ b->active = 1;
+ b->active_next_branch = active_branches;
+ active_branches = b;
+ cur_active_branches++;
+ branch_load_count++;
+ }
+}
+
+static unsigned char convert_num_notes_to_fanout(uintmax_t num_notes)
+{
+ unsigned char fanout = 0;
+ while ((num_notes >>= 8))
+ fanout++;
+ return fanout;
+}
+
+static void construct_path_with_fanout(const char *hex_sha1,
+ unsigned char fanout, char *path)
+{
+ unsigned int i = 0, j = 0;
+ if (fanout >= the_hash_algo->rawsz)
+ die("Too large fanout (%u)", fanout);
+ while (fanout) {
+ path[i++] = hex_sha1[j++];
+ path[i++] = hex_sha1[j++];
+ path[i++] = '/';
+ fanout--;
+ }
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
+}
+
+static uintmax_t do_change_note_fanout(
+ struct tree_entry *orig_root, struct tree_entry *root,
+ char *hex_oid, unsigned int hex_oid_len,
+ char *fullpath, unsigned int fullpath_len,
+ unsigned char fanout)
+{
+ struct tree_content *t;
+ struct tree_entry *e, leaf;
+ unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
+ uintmax_t num_notes = 0;
+ struct object_id oid;
+ /* hex oid + '/' between each pair of hex digits + NUL */
+ char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (!root->tree)
+ load_tree(root);
+ t = root->tree;
+
+ for (i = 0; t && i < t->entry_count; i++) {
+ e = t->entries[i];
+ tmp_hex_oid_len = hex_oid_len + e->name->str_len;
+ tmp_fullpath_len = fullpath_len;
+
+ /*
+ * We're interested in EITHER existing note entries (entries
+ * with exactly 40 hex chars in path, not including directory
+ * separators), OR directory entries that may contain note
+ * entries (with < 40 hex chars in path).
+ * Also, each path component in a note entry must be a multiple
+ * of 2 chars.
+ */
+ if (!e->versions[1].mode ||
+ tmp_hex_oid_len > hexsz ||
+ e->name->str_len % 2)
+ continue;
+
+ /* This _may_ be a note entry, or a subdir containing notes */
+ memcpy(hex_oid + hex_oid_len, e->name->str_dat,
+ e->name->str_len);
+ if (tmp_fullpath_len)
+ fullpath[tmp_fullpath_len++] = '/';
+ memcpy(fullpath + tmp_fullpath_len, e->name->str_dat,
+ e->name->str_len);
+ tmp_fullpath_len += e->name->str_len;
+ fullpath[tmp_fullpath_len] = '\0';
+
+ if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
+ /* This is a note entry */
+ if (fanout == 0xff) {
+ /* Counting mode, no rename */
+ num_notes++;
+ continue;
+ }
+ construct_path_with_fanout(hex_oid, fanout, realpath);
+ if (!strcmp(fullpath, realpath)) {
+ /* Note entry is in correct location */
+ num_notes++;
+ continue;
+ }
+
+ /* Rename fullpath to realpath */
+ if (!tree_content_remove(orig_root, fullpath, &leaf, 0))
+ die("Failed to remove path %s", fullpath);
+ tree_content_set(orig_root, realpath,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+ } else if (S_ISDIR(e->versions[1].mode)) {
+ /* This is a subdir that may contain note entries */
+ num_notes += do_change_note_fanout(orig_root, e,
+ hex_oid, tmp_hex_oid_len,
+ fullpath, tmp_fullpath_len, fanout);
+ }
+
+ /* The above may have reallocated the current tree_content */
+ t = root->tree;
+ }
+ return num_notes;
+}
+
+static uintmax_t change_note_fanout(struct tree_entry *root,
+ unsigned char fanout)
+{
+ /*
+ * The size of path is due to one slash between every two hex digits,
+ * plus the terminating NUL. Note that there is no slash at the end, so
+ * the number of slashes is one less than half the number of hex
+ * characters.
+ */
+ char hex_oid[GIT_MAX_HEXSZ], path[GIT_MAX_HEXSZ + (GIT_MAX_HEXSZ / 2) - 1 + 1];
+ return do_change_note_fanout(root, root, hex_oid, 0, path, 0, fanout);
+}
+
+static int parse_mapped_oid_hex(const char *hex, struct object_id *oid, const char **end)
+{
+ int algo;
+ khiter_t it;
+
+ /* Make SHA-1 object IDs have all-zero padding. */
+ memset(oid->hash, 0, sizeof(oid->hash));
+
+ algo = parse_oid_hex_any(hex, oid, end);
+ if (algo == GIT_HASH_UNKNOWN)
+ return -1;
+
+ it = kh_get_oid_map(sub_oid_map, *oid);
+ /* No such object? */
+ if (it == kh_end(sub_oid_map)) {
+ /* If we're using the same algorithm, pass it through. */
+ if (hash_algos[algo].format_id == the_hash_algo->format_id)
+ return 0;
+ return -1;
+ }
+ oidcpy(oid, kh_value(sub_oid_map, it));
+ return 0;
+}
+
+/*
+ * Given a pointer into a string, parse a mark reference:
+ *
+ * idnum ::= ':' bigint;
+ *
+ * Return the first character after the value in *endptr.
+ *
+ * Complain if the following character is not what is expected,
+ * either a space or end of the string.
+ */
+static uintmax_t parse_mark_ref(const char *p, char **endptr)
+{
+ uintmax_t mark;
+
+ assert(*p == ':');
+ p++;
+ mark = strtoumax(p, endptr, 10);
+ if (*endptr == p)
+ die("No value after ':' in mark: %s", command_buf.buf);
+ return mark;
+}
+
+/*
+ * Parse the mark reference, and complain if this is not the end of
+ * the string.
+ */
+static uintmax_t parse_mark_ref_eol(const char *p)
+{
+ char *end;
+ uintmax_t mark;
+
+ mark = parse_mark_ref(p, &end);
+ if (*end != '\0')
+ die("Garbage after mark: %s", command_buf.buf);
+ return mark;
+}
+
+/*
+ * Parse the mark reference, demanding a trailing space. Return a
+ * pointer to the space.
+ */
+static uintmax_t parse_mark_ref_space(const char **p)
+{
+ uintmax_t mark;
+ char *end;
+
+ mark = parse_mark_ref(*p, &end);
+ if (*end++ != ' ')
+ die("Missing space after mark: %s", command_buf.buf);
+ *p = end;
+ return mark;
+}
+
+static void file_change_m(const char *p, struct branch *b)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+ struct object_entry *oe;
+ struct object_id oid;
+ uint16_t mode, inline_data = 0;
+
+ p = get_mode(p, &mode);
+ if (!p)
+ die("Corrupt mode: %s", command_buf.buf);
+ switch (mode) {
+ case 0644:
+ case 0755:
+ mode |= S_IFREG;
+ case S_IFREG | 0644:
+ case S_IFREG | 0755:
+ case S_IFLNK:
+ case S_IFDIR:
+ case S_IFGITLINK:
+ /* ok */
+ break;
+ default:
+ die("Corrupt mode: %s", command_buf.buf);
+ }
+
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_space(&p));
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (skip_prefix(p, "inline ", &p)) {
+ inline_data = 1;
+ oe = NULL; /* not used with inline_data, but makes gcc happy */
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ oe = find_object(&oid);
+ if (*p++ != ' ')
+ die("Missing space after SHA1: %s", command_buf.buf);
+ }
+
+ strbuf_reset(&uq);
+ if (!unquote_c_style(&uq, p, &endp)) {
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+
+ /* Git does not track empty, non-toplevel directories. */
+ if (S_ISDIR(mode) && is_empty_tree_oid(&oid) && *p) {
+ tree_content_remove(&b->branch_tree, p, NULL, 0);
+ return;
+ }
+
+ if (S_ISGITLINK(mode)) {
+ if (inline_data)
+ die("Git links cannot be specified 'inline': %s",
+ command_buf.buf);
+ else if (oe) {
+ if (oe->type != OBJ_COMMIT)
+ die("Not a commit (actually a %s): %s",
+ type_name(oe->type), command_buf.buf);
+ }
+ /*
+ * Accept the sha1 without checking; it expected to be in
+ * another repository.
+ */
+ } else if (inline_data) {
+ if (S_ISDIR(mode))
+ die("Directories cannot be specified 'inline': %s",
+ command_buf.buf);
+ if (p != uq.buf) {
+ strbuf_addstr(&uq, p);
+ p = uq.buf;
+ }
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ parse_and_store_blob(&last_blob, &oid, 0);
+ break;
+ }
+ }
+ } else {
+ enum object_type expected = S_ISDIR(mode) ?
+ OBJ_TREE: OBJ_BLOB;
+ enum object_type type = oe ? oe->type :
+ oid_object_info(the_repository, &oid,
+ NULL);
+ if (type < 0)
+ die("%s not found: %s",
+ S_ISDIR(mode) ? "Tree" : "Blob",
+ command_buf.buf);
+ if (type != expected)
+ die("Not a %s (actually a %s): %s",
+ type_name(expected), type_name(type),
+ command_buf.buf);
+ }
+
+ if (!*p) {
+ tree_content_replace(&b->branch_tree, &oid, mode, NULL);
+ return;
+ }
+ tree_content_set(&b->branch_tree, p, &oid, mode, NULL);
+}
+
+static void file_change_d(const char *p, struct branch *b)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+
+ strbuf_reset(&uq);
+ if (!unquote_c_style(&uq, p, &endp)) {
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+ tree_content_remove(&b->branch_tree, p, NULL, 1);
+}
+
+static void file_change_cr(const char *s, struct branch *b, int rename)
+{
+ const char *d;
+ static struct strbuf s_uq = STRBUF_INIT;
+ static struct strbuf d_uq = STRBUF_INIT;
+ const char *endp;
+ struct tree_entry leaf;
+
+ strbuf_reset(&s_uq);
+ if (!unquote_c_style(&s_uq, s, &endp)) {
+ if (*endp != ' ')
+ die("Missing space after source: %s", command_buf.buf);
+ } else {
+ endp = strchr(s, ' ');
+ if (!endp)
+ die("Missing space after source: %s", command_buf.buf);
+ strbuf_add(&s_uq, s, endp - s);
+ }
+ s = s_uq.buf;
+
+ endp++;
+ if (!*endp)
+ die("Missing dest: %s", command_buf.buf);
+
+ d = endp;
+ strbuf_reset(&d_uq);
+ if (!unquote_c_style(&d_uq, d, &endp)) {
+ if (*endp)
+ die("Garbage after dest in: %s", command_buf.buf);
+ d = d_uq.buf;
+ }
+
+ memset(&leaf, 0, sizeof(leaf));
+ if (rename)
+ tree_content_remove(&b->branch_tree, s, &leaf, 1);
+ else
+ tree_content_get(&b->branch_tree, s, &leaf, 1);
+ if (!leaf.versions[1].mode)
+ die("Path %s not in branch", s);
+ if (!*d) { /* C "path/to/subdir" "" */
+ tree_content_replace(&b->branch_tree,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+ return;
+ }
+ tree_content_set(&b->branch_tree, d,
+ &leaf.versions[1].oid,
+ leaf.versions[1].mode,
+ leaf.tree);
+}
+
+static void note_change_n(const char *p, struct branch *b, unsigned char *old_fanout)
+{
+ static struct strbuf uq = STRBUF_INIT;
+ struct object_entry *oe;
+ struct branch *s;
+ struct object_id oid, commit_oid;
+ char path[GIT_MAX_RAWSZ * 3];
+ uint16_t inline_data = 0;
+ unsigned char new_fanout;
+
+ /*
+ * When loading a branch, we don't traverse its tree to count the real
+ * number of notes (too expensive to do this for all non-note refs).
+ * This means that recently loaded notes refs might incorrectly have
+ * b->num_notes == 0, and consequently, old_fanout might be wrong.
+ *
+ * Fix this by traversing the tree and counting the number of notes
+ * when b->num_notes == 0. If the notes tree is truly empty, the
+ * calculation should not take long.
+ */
+ if (b->num_notes == 0 && *old_fanout == 0) {
+ /* Invoke change_note_fanout() in "counting mode". */
+ b->num_notes = change_note_fanout(&b->branch_tree, 0xff);
+ *old_fanout = convert_num_notes_to_fanout(b->num_notes);
+ }
+
+ /* Now parse the notemodify command. */
+ /* <dataref> or 'inline' */
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_space(&p));
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (skip_prefix(p, "inline ", &p)) {
+ inline_data = 1;
+ oe = NULL; /* not used with inline_data, but makes gcc happy */
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ oe = find_object(&oid);
+ if (*p++ != ' ')
+ die("Missing space after SHA1: %s", command_buf.buf);
+ }
+
+ /* <commit-ish> */
+ s = lookup_branch(p);
+ if (s) {
+ if (is_null_oid(&s->oid))
+ die("Can't add a note on empty branch.");
+ oidcpy(&commit_oid, &s->oid);
+ } else if (*p == ':') {
+ uintmax_t commit_mark = parse_mark_ref_eol(p);
+ struct object_entry *commit_oe = find_mark(marks, commit_mark);
+ if (commit_oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", commit_mark);
+ oidcpy(&commit_oid, &commit_oe->idx.oid);
+ } else if (!get_oid(p, &commit_oid)) {
+ unsigned long size;
+ char *buf = read_object_with_reference(the_repository,
+ &commit_oid,
+ commit_type, &size,
+ &commit_oid);
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", p);
+ free(buf);
+ } else
+ die("Invalid ref name or SHA1 expression: %s", p);
+
+ if (inline_data) {
+ if (p != uq.buf) {
+ strbuf_addstr(&uq, p);
+ p = uq.buf;
+ }
+ read_next_command();
+ parse_and_store_blob(&last_blob, &oid, 0);
+ } else if (oe) {
+ if (oe->type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ type_name(oe->type), command_buf.buf);
+ } else if (!is_null_oid(&oid)) {
+ enum object_type type = oid_object_info(the_repository, &oid,
+ NULL);
+ if (type < 0)
+ die("Blob not found: %s", command_buf.buf);
+ if (type != OBJ_BLOB)
+ die("Not a blob (actually a %s): %s",
+ type_name(type), command_buf.buf);
+ }
+
+ construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
+ if (tree_content_remove(&b->branch_tree, path, NULL, 0))
+ b->num_notes--;
+
+ if (is_null_oid(&oid))
+ return; /* nothing to insert */
+
+ b->num_notes++;
+ new_fanout = convert_num_notes_to_fanout(b->num_notes);
+ construct_path_with_fanout(oid_to_hex(&commit_oid), new_fanout, path);
+ tree_content_set(&b->branch_tree, path, &oid, S_IFREG | 0644, NULL);
+}
+
+static void file_change_deleteall(struct branch *b)
+{
+ release_tree_content_recursive(b->branch_tree.tree);
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ load_tree(&b->branch_tree);
+ b->num_notes = 0;
+}
+
+static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
+{
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", oid_to_hex(&b->oid));
+ if (memcmp("tree ", buf, 5)
+ || get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
+ die("The commit %s is corrupt", oid_to_hex(&b->oid));
+ oidcpy(&b->branch_tree.versions[0].oid,
+ &b->branch_tree.versions[1].oid);
+}
+
+static void parse_from_existing(struct branch *b)
+{
+ if (is_null_oid(&b->oid)) {
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ } else {
+ unsigned long size;
+ char *buf;
+
+ buf = read_object_with_reference(the_repository,
+ &b->oid, commit_type, &size,
+ &b->oid);
+ parse_from_commit(b, buf, size);
+ free(buf);
+ }
+}
+
+static int parse_objectish(struct branch *b, const char *objectish)
+{
+ struct branch *s;
+ struct object_id oid;
+
+ oidcpy(&oid, &b->branch_tree.versions[1].oid);
+
+ s = lookup_branch(objectish);
+ if (b == s)
+ die("Can't create a branch from itself: %s", b->name);
+ else if (s) {
+ struct object_id *t = &s->branch_tree.versions[1].oid;
+ oidcpy(&b->oid, &s->oid);
+ oidcpy(&b->branch_tree.versions[0].oid, t);
+ oidcpy(&b->branch_tree.versions[1].oid, t);
+ } else if (*objectish == ':') {
+ uintmax_t idnum = parse_mark_ref_eol(objectish);
+ struct object_entry *oe = find_mark(marks, idnum);
+ if (oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", idnum);
+ if (!oideq(&b->oid, &oe->idx.oid)) {
+ oidcpy(&b->oid, &oe->idx.oid);
+ if (oe->pack_id != MAX_PACK_ID) {
+ unsigned long size;
+ char *buf = gfi_unpack_entry(oe, &size);
+ parse_from_commit(b, buf, size);
+ free(buf);
+ } else
+ parse_from_existing(b);
+ }
+ } else if (!get_oid(objectish, &b->oid)) {
+ parse_from_existing(b);
+ if (is_null_oid(&b->oid))
+ b->delete = 1;
+ }
+ else
+ die("Invalid ref name or SHA1 expression: %s", objectish);
+
+ if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
+ release_tree_content_recursive(b->branch_tree.tree);
+ b->branch_tree.tree = NULL;
+ }
+
+ read_next_command();
+ return 1;
+}
+
+static int parse_from(struct branch *b)
+{
+ const char *from;
+
+ if (!skip_prefix(command_buf.buf, "from ", &from))
+ return 0;
+
+ return parse_objectish(b, from);
+}
+
+static int parse_objectish_with_prefix(struct branch *b, const char *prefix)
+{
+ const char *base;
+
+ if (!skip_prefix(command_buf.buf, prefix, &base))
+ return 0;
+
+ return parse_objectish(b, base);
+}
+
+static struct hash_list *parse_merge(unsigned int *count)
+{
+ struct hash_list *list = NULL, **tail = &list, *n;
+ const char *from;
+ struct branch *s;
+
+ *count = 0;
+ while (skip_prefix(command_buf.buf, "merge ", &from)) {
+ n = xmalloc(sizeof(*n));
+ s = lookup_branch(from);
+ if (s)
+ oidcpy(&n->oid, &s->oid);
+ else if (*from == ':') {
+ uintmax_t idnum = parse_mark_ref_eol(from);
+ struct object_entry *oe = find_mark(marks, idnum);
+ if (oe->type != OBJ_COMMIT)
+ die("Mark :%" PRIuMAX " not a commit", idnum);
+ oidcpy(&n->oid, &oe->idx.oid);
+ } else if (!get_oid(from, &n->oid)) {
+ unsigned long size;
+ char *buf = read_object_with_reference(the_repository,
+ &n->oid,
+ commit_type,
+ &size, &n->oid);
+ if (!buf || size < the_hash_algo->hexsz + 6)
+ die("Not a valid commit: %s", from);
+ free(buf);
+ } else
+ die("Invalid ref name or SHA1 expression: %s", from);
+
+ n->next = NULL;
+ *tail = n;
+ tail = &n->next;
+
+ (*count)++;
+ read_next_command();
+ }
+ return list;
+}
+
+static void parse_new_commit(const char *arg)
+{
+ static struct strbuf msg = STRBUF_INIT;
+ struct branch *b;
+ char *author = NULL;
+ char *committer = NULL;
+ char *encoding = NULL;
+ struct hash_list *merge_list = NULL;
+ unsigned int merge_count;
+ unsigned char prev_fanout, new_fanout;
+ const char *v;
+
+ b = lookup_branch(arg);
+ if (!b)
+ b = new_branch(arg);
+
+ read_next_command();
+ parse_mark();
+ parse_original_identifier();
+ if (skip_prefix(command_buf.buf, "author ", &v)) {
+ author = parse_ident(v);
+ read_next_command();
+ }
+ if (skip_prefix(command_buf.buf, "committer ", &v)) {
+ committer = parse_ident(v);
+ read_next_command();
+ }
+ if (!committer)
+ die("Expected committer but didn't get one");
+ if (skip_prefix(command_buf.buf, "encoding ", &v)) {
+ encoding = xstrdup(v);
+ read_next_command();
+ }
+ parse_data(&msg, 0, NULL);
+ read_next_command();
+ parse_from(b);
+ merge_list = parse_merge(&merge_count);
+
+ /* ensure the branch is active/loaded */
+ if (!b->branch_tree.tree || !max_active_branches) {
+ unload_one_branch();
+ load_branch(b);
+ }
+
+ prev_fanout = convert_num_notes_to_fanout(b->num_notes);
+
+ /* file_change* */
+ while (command_buf.len > 0) {
+ if (skip_prefix(command_buf.buf, "M ", &v))
+ file_change_m(v, b);
+ else if (skip_prefix(command_buf.buf, "D ", &v))
+ file_change_d(v, b);
+ else if (skip_prefix(command_buf.buf, "R ", &v))
+ file_change_cr(v, b, 1);
+ else if (skip_prefix(command_buf.buf, "C ", &v))
+ file_change_cr(v, b, 0);
+ else if (skip_prefix(command_buf.buf, "N ", &v))
+ note_change_n(v, b, &prev_fanout);
+ else if (!strcmp("deleteall", command_buf.buf))
+ file_change_deleteall(b);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, b);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ unread_command_buf = 1;
+ break;
+ }
+ if (read_next_command() == EOF)
+ break;
+ }
+
+ new_fanout = convert_num_notes_to_fanout(b->num_notes);
+ if (new_fanout != prev_fanout)
+ b->num_notes = change_note_fanout(&b->branch_tree, new_fanout);
+
+ /* build the tree and the commit */
+ store_tree(&b->branch_tree);
+ oidcpy(&b->branch_tree.versions[0].oid,
+ &b->branch_tree.versions[1].oid);
+
+ strbuf_reset(&new_data);
+ strbuf_addf(&new_data, "tree %s\n",
+ oid_to_hex(&b->branch_tree.versions[1].oid));
+ if (!is_null_oid(&b->oid))
+ strbuf_addf(&new_data, "parent %s\n",
+ oid_to_hex(&b->oid));
+ while (merge_list) {
+ struct hash_list *next = merge_list->next;
+ strbuf_addf(&new_data, "parent %s\n",
+ oid_to_hex(&merge_list->oid));
+ free(merge_list);
+ merge_list = next;
+ }
+ strbuf_addf(&new_data,
+ "author %s\n"
+ "committer %s\n",
+ author ? author : committer, committer);
+ if (encoding)
+ strbuf_addf(&new_data,
+ "encoding %s\n",
+ encoding);
+ strbuf_addch(&new_data, '\n');
+ strbuf_addbuf(&new_data, &msg);
+ free(author);
+ free(committer);
+ free(encoding);
+
+ if (!store_object(OBJ_COMMIT, &new_data, NULL, &b->oid, next_mark))
+ b->pack_id = pack_id;
+ b->last_commit = object_count_by_type[OBJ_COMMIT];
+}
+
+static void parse_new_tag(const char *arg)
+{
+ static struct strbuf msg = STRBUF_INIT;
+ const char *from;
+ char *tagger;
+ struct branch *s;
+ struct tag *t;
+ uintmax_t from_mark = 0;
+ struct object_id oid;
+ enum object_type type;
+ const char *v;
+
+ t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
+ memset(t, 0, sizeof(struct tag));
+ t->name = mem_pool_strdup(&fi_mem_pool, arg);
+ if (last_tag)
+ last_tag->next_tag = t;
+ else
+ first_tag = t;
+ last_tag = t;
+ read_next_command();
+ parse_mark();
+
+ /* from ... */
+ if (!skip_prefix(command_buf.buf, "from ", &from))
+ die("Expected from command, got %s", command_buf.buf);
+ s = lookup_branch(from);
+ if (s) {
+ if (is_null_oid(&s->oid))
+ die("Can't tag an empty branch.");
+ oidcpy(&oid, &s->oid);
+ type = OBJ_COMMIT;
+ } else if (*from == ':') {
+ struct object_entry *oe;
+ from_mark = parse_mark_ref_eol(from);
+ oe = find_mark(marks, from_mark);
+ type = oe->type;
+ oidcpy(&oid, &oe->idx.oid);
+ } else if (!get_oid(from, &oid)) {
+ struct object_entry *oe = find_object(&oid);
+ if (!oe) {
+ type = oid_object_info(the_repository, &oid, NULL);
+ if (type < 0)
+ die("Not a valid object: %s", from);
+ } else
+ type = oe->type;
+ } else
+ die("Invalid ref name or SHA1 expression: %s", from);
+ read_next_command();
+
+ /* original-oid ... */
+ parse_original_identifier();
+
+ /* tagger ... */
+ if (skip_prefix(command_buf.buf, "tagger ", &v)) {
+ tagger = parse_ident(v);
+ read_next_command();
+ } else
+ tagger = NULL;
+
+ /* tag payload/message */
+ parse_data(&msg, 0, NULL);
+
+ /* build the tag object */
+ strbuf_reset(&new_data);
+
+ strbuf_addf(&new_data,
+ "object %s\n"
+ "type %s\n"
+ "tag %s\n",
+ oid_to_hex(&oid), type_name(type), t->name);
+ if (tagger)
+ strbuf_addf(&new_data,
+ "tagger %s\n", tagger);
+ strbuf_addch(&new_data, '\n');
+ strbuf_addbuf(&new_data, &msg);
+ free(tagger);
+
+ if (store_object(OBJ_TAG, &new_data, NULL, &t->oid, next_mark))
+ t->pack_id = MAX_PACK_ID;
+ else
+ t->pack_id = pack_id;
+}
+
+static void parse_reset_branch(const char *arg)
+{
+ struct branch *b;
+ const char *tag_name;
+
+ b = lookup_branch(arg);
+ if (b) {
+ oidclr(&b->oid);
+ oidclr(&b->branch_tree.versions[0].oid);
+ oidclr(&b->branch_tree.versions[1].oid);
+ if (b->branch_tree.tree) {
+ release_tree_content_recursive(b->branch_tree.tree);
+ b->branch_tree.tree = NULL;
+ }
+ }
+ else
+ b = new_branch(arg);
+ read_next_command();
+ parse_from(b);
+ if (b->delete && skip_prefix(b->name, "refs/tags/", &tag_name)) {
+ /*
+ * Elsewhere, we call dump_branches() before dump_tags(),
+ * and dump_branches() will handle ref deletions first, so
+ * in order to make sure the deletion actually takes effect,
+ * we need to remove the tag from our list of tags to update.
+ *
+ * NEEDSWORK: replace list of tags with hashmap for faster
+ * deletion?
+ */
+ struct tag *t, *prev = NULL;
+ for (t = first_tag; t; t = t->next_tag) {
+ if (!strcmp(t->name, tag_name))
+ break;
+ prev = t;
+ }
+ if (t) {
+ if (prev)
+ prev->next_tag = t->next_tag;
+ else
+ first_tag = t->next_tag;
+ if (!t->next_tag)
+ last_tag = prev;
+ /* There is no mem_pool_free(t) function to call. */
+ }
+ }
+ if (command_buf.len > 0)
+ unread_command_buf = 1;
+}
+
+static void cat_blob_write(const char *buf, unsigned long size)
+{
+ if (write_in_full(cat_blob_fd, buf, size) < 0)
+ die_errno("Write to frontend failed");
+}
+
+static void cat_blob(struct object_entry *oe, struct object_id *oid)
+{
+ struct strbuf line = STRBUF_INIT;
+ unsigned long size;
+ enum object_type type = 0;
+ char *buf;
+
+ if (!oe || oe->pack_id == MAX_PACK_ID) {
+ buf = read_object_file(oid, &type, &size);
+ } else {
+ type = oe->type;
+ buf = gfi_unpack_entry(oe, &size);
+ }
+
+ /*
+ * Output based on batch_one_object() from cat-file.c.
+ */
+ if (type <= 0) {
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%s missing\n", oid_to_hex(oid));
+ cat_blob_write(line.buf, line.len);
+ strbuf_release(&line);
+ free(buf);
+ return;
+ }
+ if (!buf)
+ die("Can't read object %s", oid_to_hex(oid));
+ if (type != OBJ_BLOB)
+ die("Object %s is a %s but a blob was expected.",
+ oid_to_hex(oid), type_name(type));
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%s %s %"PRIuMAX"\n", oid_to_hex(oid),
+ type_name(type), (uintmax_t)size);
+ cat_blob_write(line.buf, line.len);
+ strbuf_release(&line);
+ cat_blob_write(buf, size);
+ cat_blob_write("\n", 1);
+ if (oe && oe->pack_id == pack_id) {
+ last_blob.offset = oe->idx.offset;
+ strbuf_attach(&last_blob.data, buf, size, size);
+ last_blob.depth = oe->depth;
+ } else
+ free(buf);
+}
+
+static void parse_get_mark(const char *p)
+{
+ struct object_entry *oe;
+ char output[GIT_MAX_HEXSZ + 2];
+
+ /* get-mark SP <object> LF */
+ if (*p != ':')
+ die("Not a mark: %s", p);
+
+ oe = find_mark(marks, parse_mark_ref_eol(p));
+ if (!oe)
+ die("Unknown mark: %s", command_buf.buf);
+
+ xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
+ cat_blob_write(output, the_hash_algo->hexsz + 1);
+}
+
+static void parse_cat_blob(const char *p)
+{
+ struct object_entry *oe;
+ struct object_id oid;
+
+ /* cat-blob SP <object> LF */
+ if (*p == ':') {
+ oe = find_mark(marks, parse_mark_ref_eol(p));
+ if (!oe)
+ die("Unknown mark: %s", command_buf.buf);
+ oidcpy(&oid, &oe->idx.oid);
+ } else {
+ if (parse_mapped_oid_hex(p, &oid, &p))
+ die("Invalid dataref: %s", command_buf.buf);
+ if (*p)
+ die("Garbage after SHA1: %s", command_buf.buf);
+ oe = find_object(&oid);
+ }
+
+ cat_blob(oe, &oid);
+}
+
+static struct object_entry *dereference(struct object_entry *oe,
+ struct object_id *oid)
+{
+ unsigned long size;
+ char *buf = NULL;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
+ if (!oe) {
+ enum object_type type = oid_object_info(the_repository, oid,
+ NULL);
+ if (type < 0)
+ die("object not found: %s", oid_to_hex(oid));
+ /* cache it! */
+ oe = insert_object(oid);
+ oe->type = type;
+ oe->pack_id = MAX_PACK_ID;
+ oe->idx.offset = 1;
+ }
+ switch (oe->type) {
+ case OBJ_TREE: /* easy case. */
+ return oe;
+ case OBJ_COMMIT:
+ case OBJ_TAG:
+ break;
+ default:
+ die("Not a tree-ish: %s", command_buf.buf);
+ }
+
+ if (oe->pack_id != MAX_PACK_ID) { /* in a pack being written */
+ buf = gfi_unpack_entry(oe, &size);
+ } else {
+ enum object_type unused;
+ buf = read_object_file(oid, &unused, &size);
+ }
+ if (!buf)
+ die("Can't load object %s", oid_to_hex(oid));
+
+ /* Peel one layer. */
+ switch (oe->type) {
+ case OBJ_TAG:
+ if (size < hexsz + strlen("object ") ||
+ get_oid_hex(buf + strlen("object "), oid))
+ die("Invalid SHA1 in tag: %s", command_buf.buf);
+ break;
+ case OBJ_COMMIT:
+ if (size < hexsz + strlen("tree ") ||
+ get_oid_hex(buf + strlen("tree "), oid))
+ die("Invalid SHA1 in commit: %s", command_buf.buf);
+ }
+
+ free(buf);
+ return find_object(oid);
+}
+
+static void insert_mapped_mark(uintmax_t mark, void *object, void *cbp)
+{
+ struct object_id *fromoid = object;
+ struct object_id *tooid = find_mark(cbp, mark);
+ int ret;
+ khiter_t it;
+
+ it = kh_put_oid_map(sub_oid_map, *fromoid, &ret);
+ /* We've already seen this object. */
+ if (ret == 0)
+ return;
+ kh_value(sub_oid_map, it) = tooid;
+}
+
+static void build_mark_map_one(struct mark_set *from, struct mark_set *to)
+{
+ for_each_mark(from, 0, insert_mapped_mark, to);
+}
+
+static void build_mark_map(struct string_list *from, struct string_list *to)
+{
+ struct string_list_item *fromp, *top;
+
+ sub_oid_map = kh_init_oid_map();
+
+ for_each_string_list_item(fromp, from) {
+ top = string_list_lookup(to, fromp->string);
+ if (!fromp->util) {
+ die(_("Missing from marks for submodule '%s'"), fromp->string);
+ } else if (!top || !top->util) {
+ die(_("Missing to marks for submodule '%s'"), fromp->string);
+ }
+ build_mark_map_one(fromp->util, top->util);
+ }
+}
+
+static struct object_entry *parse_treeish_dataref(const char **p)
+{
+ struct object_id oid;
+ struct object_entry *e;
+
+ if (**p == ':') { /* <mark> */
+ e = find_mark(marks, parse_mark_ref_space(p));
+ if (!e)
+ die("Unknown mark: %s", command_buf.buf);
+ oidcpy(&oid, &e->idx.oid);
+ } else { /* <sha1> */
+ if (parse_mapped_oid_hex(*p, &oid, p))
+ die("Invalid dataref: %s", command_buf.buf);
+ e = find_object(&oid);
+ if (*(*p)++ != ' ')
+ die("Missing space after tree-ish: %s", command_buf.buf);
+ }
+
+ while (!e || e->type != OBJ_TREE)
+ e = dereference(e, &oid);
+ return e;
+}
+
+static void print_ls(int mode, const unsigned char *hash, const char *path)
+{
+ static struct strbuf line = STRBUF_INIT;
+
+ /* See show_tree(). */
+ const char *type =
+ S_ISGITLINK(mode) ? commit_type :
+ S_ISDIR(mode) ? tree_type :
+ blob_type;
+
+ if (!mode) {
+ /* missing SP path LF */
+ strbuf_reset(&line);
+ strbuf_addstr(&line, "missing ");
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ } else {
+ /* mode SP type SP object_name TAB path LF */
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%06o %s %s\t",
+ mode & ~NO_DELTA, type, hash_to_hex(hash));
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ }
+ cat_blob_write(line.buf, line.len);
+}
+
+static void parse_ls(const char *p, struct branch *b)
+{
+ struct tree_entry *root = NULL;
+ struct tree_entry leaf = {NULL};
+
+ /* ls SP (<tree-ish> SP)? <path> */
+ if (*p == '"') {
+ if (!b)
+ die("Not in a commit: %s", command_buf.buf);
+ root = &b->branch_tree;
+ } else {
+ struct object_entry *e = parse_treeish_dataref(&p);
+ root = new_tree_entry();
+ oidcpy(&root->versions[1].oid, &e->idx.oid);
+ if (!is_null_oid(&root->versions[1].oid))
+ root->versions[1].mode = S_IFDIR;
+ load_tree(root);
+ }
+ if (*p == '"') {
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+ strbuf_reset(&uq);
+ if (unquote_c_style(&uq, p, &endp))
+ die("Invalid path: %s", command_buf.buf);
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+ tree_content_get(root, p, &leaf, 1);
+ /*
+ * A directory in preparation would have a sha1 of zero
+ * until it is saved. Save, for simplicity.
+ */
+ if (S_ISDIR(leaf.versions[1].mode))
+ store_tree(&leaf);
+
+ print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, p);
+ if (leaf.tree)
+ release_tree_content_recursive(leaf.tree);
+ if (!b || root != &b->branch_tree)
+ release_tree_entry(root);
+}
+
+static void checkpoint(void)
+{
+ checkpoint_requested = 0;
+ if (object_count) {
+ cycle_packfile();
+ }
+ dump_branches();
+ dump_tags();
+ dump_marks();
+}
+
+static void parse_checkpoint(void)
+{
+ checkpoint_requested = 1;
+ skip_optional_lf();
+}
+
+static void parse_progress(void)
+{
+ fwrite(command_buf.buf, 1, command_buf.len, stdout);
+ fputc('\n', stdout);
+ fflush(stdout);
+ skip_optional_lf();
+}
+
+static void parse_alias(void)
+{
+ struct object_entry *e;
+ struct branch b;
+
+ skip_optional_lf();
+ read_next_command();
+
+ /* mark ... */
+ parse_mark();
+ if (!next_mark)
+ die(_("Expected 'mark' command, got %s"), command_buf.buf);
+
+ /* to ... */
+ memset(&b, 0, sizeof(b));
+ if (!parse_objectish_with_prefix(&b, "to "))
+ die(_("Expected 'to' command, got %s"), command_buf.buf);
+ e = find_object(&b.oid);
+ assert(e);
+ insert_mark(marks, next_mark, e);
+}
+
+static char* make_fast_import_path(const char *path)
+{
+ if (!relative_marks_paths || is_absolute_path(path))
+ return xstrdup(path);
+ return git_pathdup("info/fast-import/%s", path);
+}
+
+static void option_import_marks(const char *marks,
+ int from_stream, int ignore_missing)
+{
+ if (import_marks_file) {
+ if (from_stream)
+ die("Only one import-marks command allowed per stream");
+
+ /* read previous mark file */
+ if(!import_marks_file_from_stream)
+ read_marks();
+ }
+
+ import_marks_file = make_fast_import_path(marks);
+ import_marks_file_from_stream = from_stream;
+ import_marks_file_ignore_missing = ignore_missing;
+}
+
+static void option_date_format(const char *fmt)
+{
+ if (!strcmp(fmt, "raw"))
+ whenspec = WHENSPEC_RAW;
+ else if (!strcmp(fmt, "raw-permissive"))
+ whenspec = WHENSPEC_RAW_PERMISSIVE;
+ else if (!strcmp(fmt, "rfc2822"))
+ whenspec = WHENSPEC_RFC2822;
+ else if (!strcmp(fmt, "now"))
+ whenspec = WHENSPEC_NOW;
+ else
+ die("unknown --date-format argument %s", fmt);
+}
+
+static unsigned long ulong_arg(const char *option, const char *arg)
+{
+ char *endptr;
+ unsigned long rv = strtoul(arg, &endptr, 0);
+ if (strchr(arg, '-') || endptr == arg || *endptr)
+ die("%s: argument must be a non-negative integer", option);
+ return rv;
+}
+
+static void option_depth(const char *depth)
+{
+ max_depth = ulong_arg("--depth", depth);
+ if (max_depth > MAX_DEPTH)
+ die("--depth cannot exceed %u", MAX_DEPTH);
+}
+
+static void option_active_branches(const char *branches)
+{
+ max_active_branches = ulong_arg("--active-branches", branches);
+}
+
+static void option_export_marks(const char *marks)
+{
+ export_marks_file = make_fast_import_path(marks);
+}
+
+static void option_cat_blob_fd(const char *fd)
+{
+ unsigned long n = ulong_arg("--cat-blob-fd", fd);
+ if (n > (unsigned long) INT_MAX)
+ die("--cat-blob-fd cannot exceed %d", INT_MAX);
+ cat_blob_fd = (int) n;
+}
+
+static void option_export_pack_edges(const char *edges)
+{
+ if (pack_edges)
+ fclose(pack_edges);
+ pack_edges = xfopen(edges, "a");
+}
+
+static void option_rewrite_submodules(const char *arg, struct string_list *list)
+{
+ struct mark_set *ms;
+ FILE *fp;
+ char *s = xstrdup(arg);
+ char *f = strchr(s, ':');
+ if (!f)
+ die(_("Expected format name:filename for submodule rewrite option"));
+ *f = '\0';
+ f++;
+ ms = xcalloc(1, sizeof(*ms));
+ string_list_insert(list, s)->util = ms;
+
+ fp = fopen(f, "r");
+ if (!fp)
+ die_errno("cannot read '%s'", f);
+ read_mark_file(ms, fp, insert_oid_entry);
+ fclose(fp);
+}
+
+static int parse_one_option(const char *option)
+{
+ if (skip_prefix(option, "max-pack-size=", &option)) {
+ unsigned long v;
+ if (!git_parse_ulong(option, &v))
+ return 0;
+ if (v < 8192) {
+ warning("max-pack-size is now in bytes, assuming --max-pack-size=%lum", v);
+ v *= 1024 * 1024;
+ } else if (v < 1024 * 1024) {
+ warning("minimum max-pack-size is 1 MiB");
+ v = 1024 * 1024;
+ }
+ max_packsize = v;
+ } else if (skip_prefix(option, "big-file-threshold=", &option)) {
+ unsigned long v;
+ if (!git_parse_ulong(option, &v))
+ return 0;
+ big_file_threshold = v;
+ } else if (skip_prefix(option, "depth=", &option)) {
+ option_depth(option);
+ } else if (skip_prefix(option, "active-branches=", &option)) {
+ option_active_branches(option);
+ } else if (skip_prefix(option, "export-pack-edges=", &option)) {
+ option_export_pack_edges(option);
+ } else if (!strcmp(option, "quiet")) {
+ show_stats = 0;
+ } else if (!strcmp(option, "stats")) {
+ show_stats = 1;
+ } else if (!strcmp(option, "allow-unsafe-features")) {
+ ; /* already handled during early option parsing */
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+static void check_unsafe_feature(const char *feature, int from_stream)
+{
+ if (from_stream && !allow_unsafe_features)
+ die(_("feature '%s' forbidden in input without --allow-unsafe-features"),
+ feature);
+}
+
+static int parse_one_feature(const char *feature, int from_stream)
+{
+ const char *arg;
+
+ if (skip_prefix(feature, "date-format=", &arg)) {
+ option_date_format(arg);
+ } else if (skip_prefix(feature, "import-marks=", &arg)) {
+ check_unsafe_feature("import-marks", from_stream);
+ option_import_marks(arg, from_stream, 0);
+ } else if (skip_prefix(feature, "import-marks-if-exists=", &arg)) {
+ check_unsafe_feature("import-marks-if-exists", from_stream);
+ option_import_marks(arg, from_stream, 1);
+ } else if (skip_prefix(feature, "export-marks=", &arg)) {
+ check_unsafe_feature(feature, from_stream);
+ option_export_marks(arg);
+ } else if (!strcmp(feature, "alias")) {
+ ; /* Don't die - this feature is supported */
+ } else if (skip_prefix(feature, "rewrite-submodules-to=", &arg)) {
+ option_rewrite_submodules(arg, &sub_marks_to);
+ } else if (skip_prefix(feature, "rewrite-submodules-from=", &arg)) {
+ option_rewrite_submodules(arg, &sub_marks_from);
+ } else if (skip_prefix(feature, "rewrite-submodules-from=", &arg)) {
+ } else if (!strcmp(feature, "get-mark")) {
+ ; /* Don't die - this feature is supported */
+ } else if (!strcmp(feature, "cat-blob")) {
+ ; /* Don't die - this feature is supported */
+ } else if (!strcmp(feature, "relative-marks")) {
+ relative_marks_paths = 1;
+ } else if (!strcmp(feature, "no-relative-marks")) {
+ relative_marks_paths = 0;
+ } else if (!strcmp(feature, "done")) {
+ require_explicit_termination = 1;
+ } else if (!strcmp(feature, "force")) {
+ force_update = 1;
+ } else if (!strcmp(feature, "notes") || !strcmp(feature, "ls")) {
+ ; /* do nothing; we have the feature */
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+static void parse_feature(const char *feature)
+{
+ if (seen_data_command)
+ die("Got feature command '%s' after data command", feature);
+
+ if (parse_one_feature(feature, 1))
+ return;
+
+ die("This version of fast-import does not support feature %s.", feature);
+}
+
+static void parse_option(const char *option)
+{
+ if (seen_data_command)
+ die("Got option command '%s' after data command", option);
+
+ if (parse_one_option(option))
+ return;
+
+ die("This version of fast-import does not support option: %s", option);
+}
+
+static void git_pack_config(void)
+{
+ int indexversion_value;
+ int limit;
+ unsigned long packsizelimit_value;
+
+ if (!git_config_get_ulong("pack.depth", &max_depth)) {
+ if (max_depth > MAX_DEPTH)
+ max_depth = MAX_DEPTH;
+ }
+ if (!git_config_get_int("pack.indexversion", &indexversion_value)) {
+ pack_idx_opts.version = indexversion_value;
+ if (pack_idx_opts.version > 2)
+ git_die_config("pack.indexversion",
+ "bad pack.indexversion=%"PRIu32, pack_idx_opts.version);
+ }
+ if (!git_config_get_ulong("pack.packsizelimit", &packsizelimit_value))
+ max_packsize = packsizelimit_value;
+
+ if (!git_config_get_int("fastimport.unpacklimit", &limit))
+ unpack_limit = limit;
+ else if (!git_config_get_int("transfer.unpacklimit", &limit))
+ unpack_limit = limit;
+
+ git_config(git_default_config, NULL);
+}
+
+static const char fast_import_usage[] =
+"git fast-import [--date-format=<f>] [--max-pack-size=<n>] [--big-file-threshold=<n>] [--depth=<n>] [--active-branches=<n>] [--export-marks=<marks.file>]";
+
+static void parse_argv(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < global_argc; i++) {
+ const char *a = global_argv[i];
+
+ if (*a != '-' || !strcmp(a, "--"))
+ break;
+
+ if (!skip_prefix(a, "--", &a))
+ die("unknown option %s", a);
+
+ if (parse_one_option(a))
+ continue;
+
+ if (parse_one_feature(a, 0))
+ continue;
+
+ if (skip_prefix(a, "cat-blob-fd=", &a)) {
+ option_cat_blob_fd(a);
+ continue;
+ }
+
+ die("unknown option --%s", a);
+ }
+ if (i != global_argc)
+ usage(fast_import_usage);
+
+ seen_data_command = 1;
+ if (import_marks_file)
+ read_marks();
+ build_mark_map(&sub_marks_from, &sub_marks_to);
+}
+
+int cmd_fast_import(int argc, const char **argv, const char *prefix)
+{
+ unsigned int i;
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(fast_import_usage);
+
+ reset_pack_idx_option(&pack_idx_opts);
+ git_pack_config();
+
+ alloc_objects(object_entry_alloc);
+ strbuf_init(&command_buf, 0);
+ atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
+ branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
+ avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
+ marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
+
+ hashmap_init(&object_table, object_entry_hashcmp, NULL, 0);
+
+ /*
+ * We don't parse most options until after we've seen the set of
+ * "feature" lines at the start of the stream (which allows the command
+ * line to override stream data). But we must do an early parse of any
+ * command-line options that impact how we interpret the feature lines.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (*arg != '-' || !strcmp(arg, "--"))
+ break;
+ if (!strcmp(arg, "--allow-unsafe-features"))
+ allow_unsafe_features = 1;
+ }
+
+ global_argc = argc;
+ global_argv = argv;
+
+ rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
+ for (i = 0; i < (cmd_save - 1); i++)
+ rc_free[i].next = &rc_free[i + 1];
+ rc_free[cmd_save - 1].next = NULL;
+
+ start_packfile();
+ set_die_routine(die_nicely);
+ set_checkpoint_signal();
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (!strcmp("blob", command_buf.buf))
+ parse_new_blob();
+ else if (skip_prefix(command_buf.buf, "commit ", &v))
+ parse_new_commit(v);
+ else if (skip_prefix(command_buf.buf, "tag ", &v))
+ parse_new_tag(v);
+ else if (skip_prefix(command_buf.buf, "reset ", &v))
+ parse_reset_branch(v);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, NULL);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else if (skip_prefix(command_buf.buf, "get-mark ", &v))
+ parse_get_mark(v);
+ else if (!strcmp("checkpoint", command_buf.buf))
+ parse_checkpoint();
+ else if (!strcmp("done", command_buf.buf))
+ break;
+ else if (!strcmp("alias", command_buf.buf))
+ parse_alias();
+ else if (starts_with(command_buf.buf, "progress "))
+ parse_progress();
+ else if (skip_prefix(command_buf.buf, "feature ", &v))
+ parse_feature(v);
+ else if (skip_prefix(command_buf.buf, "option git ", &v))
+ parse_option(v);
+ else if (starts_with(command_buf.buf, "option "))
+ /* ignore non-git options*/;
+ else
+ die("Unsupported command: %s", command_buf.buf);
+
+ if (checkpoint_requested)
+ checkpoint();
+ }
+
+ /* argv hasn't been parsed yet, do so */
+ if (!seen_data_command)
+ parse_argv();
+
+ if (require_explicit_termination && feof(stdin))
+ die("stream ends early");
+
+ end_packfile();
+
+ dump_branches();
+ dump_tags();
+ unkeep_all_packs();
+ dump_marks();
+
+ if (pack_edges)
+ fclose(pack_edges);
+
+ if (show_stats) {
+ uintmax_t total_count = 0, duplicate_count = 0;
+ for (i = 0; i < ARRAY_SIZE(object_count_by_type); i++)
+ total_count += object_count_by_type[i];
+ for (i = 0; i < ARRAY_SIZE(duplicate_count_by_type); i++)
+ duplicate_count += duplicate_count_by_type[i];
+
+ fprintf(stderr, "%s statistics:\n", argv[0]);
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count);
+ fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count);
+ fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]);
+ fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]);
+ fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]);
+ fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]);
+ fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
+ fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
+ fprintf(stderr, " atoms: %10u\n", atom_cnt);
+ fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (tree_entry_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
+ fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ pack_report();
+ fprintf(stderr, "---------------------------------------------------------------------\n");
+ fprintf(stderr, "\n");
+ }
+
+ return failure ? 1 : 0;
+}
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index bbb5c96167..58b7c1fbdc 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -153,10 +153,6 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
args.from_promisor = 1;
continue;
}
- if (!strcmp("--no-dependents", arg)) {
- args.no_dependents = 1;
- continue;
- }
if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
parse_list_objects_filter(&args.filter_options, arg);
continue;
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 2eb8d6a5a5..a6d3268661 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -80,6 +80,7 @@ static struct list_objects_filter_options filter_options;
static struct string_list server_options = STRING_LIST_INIT_DUP;
static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
static int fetch_write_commit_graph = -1;
+static int stdin_refspecs = 0;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
@@ -205,6 +206,8 @@ static struct option builtin_fetch_options[] = {
N_("check for forced-updates on all updated branches")),
OPT_BOOL(0, "write-commit-graph", &fetch_write_commit_graph,
N_("write the commit-graph after fetching")),
+ OPT_BOOL(0, "stdin", &stdin_refspecs,
+ N_("accept refspecs from stdin")),
OPT_END()
};
@@ -442,6 +445,7 @@ static struct ref *get_ref_map(struct remote *remote,
struct ref *orefs = NULL, **oref_tail = &orefs;
struct hashmap existing_refs;
+ int existing_refs_populated = 0;
if (rs->nr) {
struct refspec *fetch_refspec;
@@ -535,15 +539,18 @@ static struct ref *get_ref_map(struct remote *remote,
ref_map = ref_remove_duplicates(ref_map);
- refname_hash_init(&existing_refs);
- for_each_ref(add_one_refname, &existing_refs);
-
for (rm = ref_map; rm; rm = rm->next) {
if (rm->peer_ref) {
const char *refname = rm->peer_ref->name;
struct refname_hash_entry *peer_item;
unsigned int hash = strhash(refname);
+ if (!existing_refs_populated) {
+ refname_hash_init(&existing_refs);
+ for_each_ref(add_one_refname, &existing_refs);
+ existing_refs_populated = 1;
+ }
+
peer_item = hashmap_get_entry_from_hash(&existing_refs,
hash, refname,
struct refname_hash_entry, ent);
@@ -553,7 +560,8 @@ static struct ref *get_ref_map(struct remote *remote,
}
}
}
- hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
+ if (existing_refs_populated)
+ hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
return ref_map;
}
@@ -648,7 +656,7 @@ static void prepare_format_display(struct ref *ref_map)
struct ref *rm;
const char *format = "full";
- git_config_get_string_const("fetch.output", &format);
+ git_config_get_string_tmp("fetch.output", &format);
if (!strcasecmp(format, "full"))
compact_format = 0;
else if (!strcasecmp(format, "compact"))
@@ -1015,11 +1023,17 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
rc |= update_local_ref(ref, what, rm, &note,
summary_width);
free(ref);
- } else
+ } else if (write_fetch_head || dry_run) {
+ /*
+ * Display fetches written to FETCH_HEAD (or
+ * would be written to FETCH_HEAD, if --dry-run
+ * is set).
+ */
format_display(&note, '*',
*kind ? kind : "branch", NULL,
*what ? what : "HEAD",
"FETCH_HEAD", summary_width);
+ }
if (note.len) {
if (verbosity >= 0 && !shown_url) {
fprintf(stderr, _("From %.*s\n"),
@@ -1680,7 +1694,8 @@ static inline void fetch_one_setup_partial(struct remote *remote)
return;
}
-static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
+static int fetch_one(struct remote *remote, int argc, const char **argv,
+ int prune_tags_ok, int use_stdin_refspecs)
{
struct refspec rs = REFSPEC_INIT_FETCH;
int i;
@@ -1737,6 +1752,13 @@ static int fetch_one(struct remote *remote, int argc, const char **argv, int pru
}
}
+ if (use_stdin_refspecs) {
+ struct strbuf line = STRBUF_INIT;
+ while (strbuf_getline_lf(&line, stdin) != EOF)
+ refspec_append(&rs, line.buf);
+ strbuf_release(&line);
+ }
+
if (server_options.nr)
gtransport->server_options = &server_options;
@@ -1771,12 +1793,18 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
free(anon);
}
- fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
- &recurse_submodules);
git_config(git_fetch_config, NULL);
argc = parse_options(argc, argv, prefix,
builtin_fetch_options, builtin_fetch_usage, 0);
+ if (recurse_submodules != RECURSE_SUBMODULES_OFF) {
+ int *sfjc = submodule_fetch_jobs_config == -1
+ ? &submodule_fetch_jobs_config : NULL;
+ int *rs = recurse_submodules == RECURSE_SUBMODULES_DEFAULT
+ ? &recurse_submodules : NULL;
+
+ fetch_config_from_gitmodules(sfjc, rs);
+ }
if (deepen_relative) {
if (deepen_relative < 0)
@@ -1837,7 +1865,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
if (remote) {
if (filter_options.choice || has_promisor_remote())
fetch_one_setup_partial(remote);
- result = fetch_one(remote, argc, argv, prune_tags_ok);
+ result = fetch_one(remote, argc, argv, prune_tags_ok, stdin_refspecs);
} else {
int max_children = max_jobs;
@@ -1845,6 +1873,10 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
die(_("--filter can only be used with the remote "
"configured in extensions.partialclone"));
+ if (stdin_refspecs)
+ die(_("--stdin can only be used when fetching "
+ "from one remote"));
+
if (max_children < 0)
max_children = fetch_parallel_config;
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index f865666db9..9721bf1ffe 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1798,9 +1798,22 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
if (HAVE_THREADS && !nr_threads) {
nr_threads = online_cpus();
- /* An experiment showed that more threads does not mean faster */
- if (nr_threads > 3)
- nr_threads = 3;
+ /*
+ * Experiments show that going above 20 threads doesn't help,
+ * no matter how many cores you have. Below that, we tend to
+ * max at half the number of online_cpus(), presumably because
+ * half of those are hyperthreads rather than full cores. We'll
+ * never reduce the level below "3", though, to match a
+ * historical value that nobody complained about.
+ */
+ if (nr_threads < 4)
+ ; /* too few cores to consider capping */
+ else if (nr_threads < 6)
+ nr_threads = 3; /* historic cap */
+ else if (nr_threads < 40)
+ nr_threads /= 2;
+ else
+ nr_threads = 20; /* hard cap */
}
curr_pack = open_pack_file(pack_name);
diff --git a/builtin/merge.c b/builtin/merge.c
index 74829a838e..b9a89ba858 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -1348,7 +1348,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
else
die(_("You have not concluded your merge (MERGE_HEAD exists)."));
}
- if (file_exists(git_path_cherry_pick_head(the_repository))) {
+ if (ref_exists("CHERRY_PICK_HEAD")) {
if (advice_resolve_conflict)
die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists).\n"
"Please, commit your changes before you merge."));
diff --git a/builtin/rebase.c b/builtin/rebase.c
index dadb52fa92..c4ff2039ef 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -92,6 +92,8 @@ struct rebase_options {
int autosquash;
char *gpg_sign_opt;
int autostash;
+ int committer_date_is_author_date;
+ int ignore_date;
char *cmd;
int allow_empty_message;
int rebase_merges, rebase_cousins;
@@ -130,8 +132,12 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
replay.quiet = !(opts->flags & REBASE_NO_QUIET);
replay.verbose = opts->flags & REBASE_VERBOSE;
replay.reschedule_failed_exec = opts->reschedule_failed_exec;
+ replay.committer_date_is_author_date =
+ opts->committer_date_is_author_date;
+ replay.ignore_date = opts->ignore_date;
replay.gpg_sign = xstrdup_or_null(opts->gpg_sign_opt);
replay.strategy = opts->strategy;
+
if (opts->strategy_opts)
parse_strategy_opts(&replay, opts->strategy_opts);
@@ -1289,6 +1295,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
struct strbuf revisions = STRBUF_INIT;
struct strbuf buf = STRBUF_INIT;
struct object_id merge_base;
+ int ignore_whitespace = 0;
enum action action = ACTION_NONE;
const char *gpg_sign = NULL;
struct string_list exec = STRING_LIST_INIT_NODUP;
@@ -1318,16 +1325,17 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
PARSE_OPT_NOARG, NULL, REBASE_DIFFSTAT },
OPT_BOOL(0, "signoff", &options.signoff,
N_("add a Signed-off-by: line to each commit")),
- OPT_PASSTHRU_ARGV(0, "ignore-whitespace", &options.git_am_opts,
- NULL, N_("passed to 'git am'"),
- PARSE_OPT_NOARG),
- OPT_PASSTHRU_ARGV(0, "committer-date-is-author-date",
- &options.git_am_opts, NULL,
- N_("passed to 'git am'"), PARSE_OPT_NOARG),
- OPT_PASSTHRU_ARGV(0, "ignore-date", &options.git_am_opts, NULL,
- N_("passed to 'git am'"), PARSE_OPT_NOARG),
+ OPT_BOOL(0, "committer-date-is-author-date",
+ &options.committer_date_is_author_date,
+ N_("make committer date match author date")),
+ OPT_BOOL(0, "reset-author-date", &options.ignore_date,
+ N_("ignore author date and use current date")),
+ OPT_HIDDEN_BOOL(0, "ignore-date", &options.ignore_date,
+ N_("synonym of --reset-author-date")),
OPT_PASSTHRU_ARGV('C', NULL, &options.git_am_opts, N_("n"),
N_("passed to 'git apply'"), 0),
+ OPT_BOOL(0, "ignore-whitespace", &ignore_whitespace,
+ N_("ignore changes in whitespace")),
OPT_PASSTHRU_ARGV(0, "whitespace", &options.git_am_opts,
N_("action"), N_("passed to 'git apply'"), 0),
OPT_BIT('f', "force-rebase", &options.flags,
@@ -1624,12 +1632,12 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.autosquash) {
allow_preemptive_ff = 0;
}
+ if (options.committer_date_is_author_date || options.ignore_date)
+ options.flags |= REBASE_FORCE;
for (i = 0; i < options.git_am_opts.nr; i++) {
const char *option = options.git_am_opts.v[i], *p;
- if (!strcmp(option, "--committer-date-is-author-date") ||
- !strcmp(option, "--ignore-date") ||
- !strcmp(option, "--whitespace=fix") ||
+ if (!strcmp(option, "--whitespace=fix") ||
!strcmp(option, "--whitespace=strip"))
allow_preemptive_ff = 0;
else if (skip_prefix(option, "-C", &p)) {
@@ -1682,6 +1690,23 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
imply_merge(&options, "--rebase-merges");
}
+ if (options.type == REBASE_APPLY) {
+ if (ignore_whitespace)
+ strvec_push(&options.git_am_opts,
+ "--ignore-whitespace");
+ if (options.committer_date_is_author_date)
+ strvec_push(&options.git_am_opts,
+ "--committer-date-is-author-date");
+ if (options.ignore_date)
+ strvec_push(&options.git_am_opts, "--ignore-date");
+ } else {
+ /* REBASE_MERGE and PRESERVE_MERGES */
+ if (ignore_whitespace) {
+ string_list_append(&strategy_options,
+ "ignore-space-change");
+ }
+ }
+
if (strategy_options.nr) {
int i;
diff --git a/builtin/remote.c b/builtin/remote.c
index c8240e9fcd..542f56e387 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -478,6 +478,7 @@ static int get_head_names(const struct ref *remote_refs, struct ref_states *stat
struct ref *fetch_map = NULL, **fetch_map_tail = &fetch_map;
struct refspec_item refspec;
+ memset(&refspec, 0, sizeof(refspec));
refspec.force = 0;
refspec.pattern = 1;
refspec.src = refspec.dst = "refs/heads/*";
diff --git a/builtin/repack.c b/builtin/repack.c
index 04c5ceaf7e..01e7767c79 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -133,7 +133,11 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list,
static void remove_redundant_pack(const char *dir_name, const char *base_name)
{
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s/%s.pack", dir_name, base_name);
+ struct multi_pack_index *m = get_local_multi_pack_index(the_repository);
+ strbuf_addf(&buf, "%s.pack", base_name);
+ if (m && midx_contains_pack(m, buf.buf))
+ clear_midx_file(the_repository);
+ strbuf_insertf(&buf, 0, "%s/", dir_name);
unlink_pack_path(buf.buf, 1);
strbuf_release(&buf);
}
@@ -286,7 +290,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
int no_update_server_info = 0;
- int midx_cleared = 0;
struct pack_objects_args po_args = {NULL};
struct option builtin_repack_options[] = {
@@ -439,11 +442,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
char *fname, *fname_old;
- if (!midx_cleared) {
- clear_midx_file(the_repository);
- midx_cleared = 1;
- }
-
fname = mkpathdup("%s/pack-%s%s", packdir,
item->string, exts[ext].name);
if (!file_exists(fname)) {
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index df135abbf1..b9df79befc 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -612,7 +612,6 @@ struct init_cb {
const char *prefix;
unsigned int flags;
};
-
#define INIT_CB_INIT { NULL, 0 }
static void init_submodule(const char *path, const char *prefix,
@@ -742,7 +741,6 @@ struct status_cb {
const char *prefix;
unsigned int flags;
};
-
#define STATUS_CB_INIT { NULL, 0 }
static void print_status(unsigned int flags, char state, const char *path,
@@ -929,11 +927,438 @@ static int module_name(int argc, const char **argv, const char *prefix)
return 0;
}
+struct module_cb {
+ unsigned int mod_src;
+ unsigned int mod_dst;
+ struct object_id oid_src;
+ struct object_id oid_dst;
+ char status;
+ const char *sm_path;
+};
+#define MODULE_CB_INIT { 0, 0, NULL, NULL, '\0', NULL }
+
+struct module_cb_list {
+ struct module_cb **entries;
+ int alloc, nr;
+};
+#define MODULE_CB_LIST_INIT { NULL, 0, 0 }
+
+struct summary_cb {
+ int argc;
+ const char **argv;
+ const char *prefix;
+ unsigned int cached: 1;
+ unsigned int for_status: 1;
+ unsigned int files: 1;
+ int summary_limit;
+};
+#define SUMMARY_CB_INIT { 0, NULL, NULL, 0, 0, 0, 0 }
+
+enum diff_cmd {
+ DIFF_INDEX,
+ DIFF_FILES
+};
+
+static char* verify_submodule_committish(const char *sm_path,
+ const char *committish)
+{
+ struct child_process cp_rev_parse = CHILD_PROCESS_INIT;
+ struct strbuf result = STRBUF_INIT;
+
+ cp_rev_parse.git_cmd = 1;
+ cp_rev_parse.dir = sm_path;
+ prepare_submodule_repo_env(&cp_rev_parse.env_array);
+ strvec_pushl(&cp_rev_parse.args, "rev-parse", "-q", "--short", NULL);
+ strvec_pushf(&cp_rev_parse.args, "%s^0", committish);
+ strvec_push(&cp_rev_parse.args, "--");
+
+ if (capture_command(&cp_rev_parse, &result, 0))
+ return NULL;
+
+ strbuf_trim_trailing_newline(&result);
+ return strbuf_detach(&result, NULL);
+}
+
+static void print_submodule_summary(struct summary_cb *info, char* errmsg,
+ int total_commits, const char *displaypath,
+ const char *src_abbrev, const char *dst_abbrev,
+ int missing_src, int missing_dst,
+ struct module_cb *p)
+{
+ if (p->status == 'T') {
+ if (S_ISGITLINK(p->mod_dst))
+ printf(_("* %s %s(blob)->%s(submodule)"),
+ displaypath, src_abbrev, dst_abbrev);
+ else
+ printf(_("* %s %s(submodule)->%s(blob)"),
+ displaypath, src_abbrev, dst_abbrev);
+ } else {
+ printf("* %s %s...%s",
+ displaypath, src_abbrev, dst_abbrev);
+ }
+
+ if (total_commits < 0)
+ printf(":\n");
+ else
+ printf(" (%d):\n", total_commits);
+
+ if (errmsg) {
+ printf(_("%s"), errmsg);
+ } else if (total_commits > 0) {
+ struct child_process cp_log = CHILD_PROCESS_INIT;
+
+ cp_log.git_cmd = 1;
+ cp_log.dir = p->sm_path;
+ prepare_submodule_repo_env(&cp_log.env_array);
+ strvec_pushl(&cp_log.args, "log", NULL);
+
+ if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst)) {
+ if (info->summary_limit > 0)
+ strvec_pushf(&cp_log.args, "-%d",
+ info->summary_limit);
+
+ strvec_pushl(&cp_log.args, "--pretty= %m %s",
+ "--first-parent", NULL);
+ strvec_pushf(&cp_log.args, "%s...%s",
+ src_abbrev, dst_abbrev);
+ } else if (S_ISGITLINK(p->mod_dst)) {
+ strvec_pushl(&cp_log.args, "--pretty= > %s",
+ "-1", dst_abbrev, NULL);
+ } else {
+ strvec_pushl(&cp_log.args, "--pretty= < %s",
+ "-1", src_abbrev, NULL);
+ }
+ run_command(&cp_log);
+ }
+ printf("\n");
+}
+
+static void generate_submodule_summary(struct summary_cb *info,
+ struct module_cb *p)
+{
+ char *displaypath, *src_abbrev, *dst_abbrev;
+ int missing_src = 0, missing_dst = 0;
+ char *errmsg = NULL;
+ int total_commits = -1;
+
+ if (!info->cached && oideq(&p->oid_dst, &null_oid)) {
+ if (S_ISGITLINK(p->mod_dst)) {
+ struct ref_store *refs = get_submodule_ref_store(p->sm_path);
+ if (refs)
+ refs_head_ref(refs, handle_submodule_head_ref, &p->oid_dst);
+ } else if (S_ISLNK(p->mod_dst) || S_ISREG(p->mod_dst)) {
+ struct stat st;
+ int fd = open(p->sm_path, O_RDONLY);
+
+ if (fd < 0 || fstat(fd, &st) < 0 ||
+ index_fd(&the_index, &p->oid_dst, fd, &st, OBJ_BLOB,
+ p->sm_path, 0))
+ error(_("couldn't hash object from '%s'"), p->sm_path);
+ } else {
+ /* for a submodule removal (mode:0000000), don't warn */
+ if (p->mod_dst)
+ warning(_("unexpected mode %d\n"), p->mod_dst);
+ }
+ }
+
+ if (S_ISGITLINK(p->mod_src)) {
+ src_abbrev = verify_submodule_committish(p->sm_path,
+ oid_to_hex(&p->oid_src));
+ if (!src_abbrev) {
+ missing_src = 1;
+ /*
+ * As `rev-parse` failed, we fallback to getting
+ * the abbreviated hash using oid_src. We do
+ * this as we might still need the abbreviated
+ * hash in cases like a submodule type change, etc.
+ */
+ src_abbrev = xstrndup(oid_to_hex(&p->oid_src), 7);
+ }
+ } else {
+ /*
+ * The source does not point to a submodule.
+ * So, we fallback to getting the abbreviation using
+ * oid_src as we might still need the abbreviated
+ * hash in cases like submodule add, etc.
+ */
+ src_abbrev = xstrndup(oid_to_hex(&p->oid_src), 7);
+ }
+
+ if (S_ISGITLINK(p->mod_dst)) {
+ dst_abbrev = verify_submodule_committish(p->sm_path,
+ oid_to_hex(&p->oid_dst));
+ if (!dst_abbrev) {
+ missing_dst = 1;
+ /*
+ * As `rev-parse` failed, we fallback to getting
+ * the abbreviated hash using oid_dst. We do
+ * this as we might still need the abbreviated
+ * hash in cases like a submodule type change, etc.
+ */
+ dst_abbrev = xstrndup(oid_to_hex(&p->oid_dst), 7);
+ }
+ } else {
+ /*
+ * The destination does not point to a submodule.
+ * So, we fallback to getting the abbreviation using
+ * oid_dst as we might still need the abbreviated
+ * hash in cases like a submodule removal, etc.
+ */
+ dst_abbrev = xstrndup(oid_to_hex(&p->oid_dst), 7);
+ }
+
+ displaypath = get_submodule_displaypath(p->sm_path, info->prefix);
+
+ if (!missing_src && !missing_dst) {
+ struct child_process cp_rev_list = CHILD_PROCESS_INIT;
+ struct strbuf sb_rev_list = STRBUF_INIT;
+
+ strvec_pushl(&cp_rev_list.args, "rev-list",
+ "--first-parent", "--count", NULL);
+ if (S_ISGITLINK(p->mod_src) && S_ISGITLINK(p->mod_dst))
+ strvec_pushf(&cp_rev_list.args, "%s...%s",
+ src_abbrev, dst_abbrev);
+ else
+ strvec_push(&cp_rev_list.args, S_ISGITLINK(p->mod_src) ?
+ src_abbrev : dst_abbrev);
+ strvec_push(&cp_rev_list.args, "--");
+
+ cp_rev_list.git_cmd = 1;
+ cp_rev_list.dir = p->sm_path;
+ prepare_submodule_repo_env(&cp_rev_list.env_array);
+
+ if (!capture_command(&cp_rev_list, &sb_rev_list, 0))
+ total_commits = atoi(sb_rev_list.buf);
+
+ strbuf_release(&sb_rev_list);
+ } else {
+ /*
+ * Don't give error msg for modification whose dst is not
+ * submodule, i.e., deleted or changed to blob
+ */
+ if (S_ISGITLINK(p->mod_dst)) {
+ struct strbuf errmsg_str = STRBUF_INIT;
+ if (missing_src && missing_dst) {
+ strbuf_addf(&errmsg_str, " Warn: %s doesn't contain commits %s and %s\n",
+ displaypath, oid_to_hex(&p->oid_src),
+ oid_to_hex(&p->oid_dst));
+ } else {
+ strbuf_addf(&errmsg_str, " Warn: %s doesn't contain commit %s\n",
+ displaypath, missing_src ?
+ oid_to_hex(&p->oid_src) :
+ oid_to_hex(&p->oid_dst));
+ }
+ errmsg = strbuf_detach(&errmsg_str, NULL);
+ }
+ }
+
+ print_submodule_summary(info, errmsg, total_commits,
+ displaypath, src_abbrev,
+ dst_abbrev, missing_src,
+ missing_dst, p);
+
+ free(displaypath);
+ free(src_abbrev);
+ free(dst_abbrev);
+}
+
+static void prepare_submodule_summary(struct summary_cb *info,
+ struct module_cb_list *list)
+{
+ int i;
+ for (i = 0; i < list->nr; i++) {
+ const struct submodule *sub;
+ struct module_cb *p = list->entries[i];
+ struct strbuf sm_gitdir = STRBUF_INIT;
+
+ if (p->status == 'D' || p->status == 'T') {
+ generate_submodule_summary(info, p);
+ continue;
+ }
+
+ if (info->for_status && p->status != 'A' &&
+ (sub = submodule_from_path(the_repository,
+ &null_oid, p->sm_path))) {
+ char *config_key = NULL;
+ const char *value;
+ int ignore_all = 0;
+
+ config_key = xstrfmt("submodule.%s.ignore",
+ sub->name);
+ if (!git_config_get_string_tmp(config_key, &value))
+ ignore_all = !strcmp(value, "all");
+ else if (sub->ignore)
+ ignore_all = !strcmp(sub->ignore, "all");
+
+ free(config_key);
+ if (ignore_all)
+ continue;
+ }
+
+ /* Also show added or modified modules which are checked out */
+ strbuf_addstr(&sm_gitdir, p->sm_path);
+ if (is_nonbare_repository_dir(&sm_gitdir))
+ generate_submodule_summary(info, p);
+ strbuf_release(&sm_gitdir);
+ }
+}
+
+static void submodule_summary_callback(struct diff_queue_struct *q,
+ struct diff_options *options,
+ void *data)
+{
+ int i;
+ struct module_cb_list *list = data;
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ struct module_cb *temp;
+
+ if (!S_ISGITLINK(p->one->mode) && !S_ISGITLINK(p->two->mode))
+ continue;
+ temp = (struct module_cb*)malloc(sizeof(struct module_cb));
+ temp->mod_src = p->one->mode;
+ temp->mod_dst = p->two->mode;
+ temp->oid_src = p->one->oid;
+ temp->oid_dst = p->two->oid;
+ temp->status = p->status;
+ temp->sm_path = xstrdup(p->one->path);
+
+ ALLOC_GROW(list->entries, list->nr + 1, list->alloc);
+ list->entries[list->nr++] = temp;
+ }
+}
+
+static const char *get_diff_cmd(enum diff_cmd diff_cmd)
+{
+ switch (diff_cmd) {
+ case DIFF_INDEX: return "diff-index";
+ case DIFF_FILES: return "diff-files";
+ default: BUG("bad diff_cmd value %d", diff_cmd);
+ }
+}
+
+static int compute_summary_module_list(struct object_id *head_oid,
+ struct summary_cb *info,
+ enum diff_cmd diff_cmd)
+{
+ struct strvec diff_args = STRVEC_INIT;
+ struct rev_info rev;
+ struct module_cb_list list = MODULE_CB_LIST_INIT;
+
+ strvec_push(&diff_args, get_diff_cmd(diff_cmd));
+ if (info->cached)
+ strvec_push(&diff_args, "--cached");
+ strvec_pushl(&diff_args, "--ignore-submodules=dirty", "--raw", NULL);
+ if (head_oid)
+ strvec_push(&diff_args, oid_to_hex(head_oid));
+ strvec_push(&diff_args, "--");
+ if (info->argc)
+ strvec_pushv(&diff_args, info->argv);
+
+ git_config(git_diff_basic_config, NULL);
+ init_revisions(&rev, info->prefix);
+ rev.abbrev = 0;
+ precompose_argv(diff_args.nr, diff_args.v);
+ setup_revisions(diff_args.nr, diff_args.v, &rev, NULL);
+ rev.diffopt.output_format = DIFF_FORMAT_NO_OUTPUT | DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = submodule_summary_callback;
+ rev.diffopt.format_callback_data = &list;
+
+ if (!info->cached) {
+ if (diff_cmd == DIFF_INDEX)
+ setup_work_tree();
+ if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
+ perror("read_cache_preload");
+ return -1;
+ }
+ } else if (read_cache() < 0) {
+ perror("read_cache");
+ return -1;
+ }
+
+ if (diff_cmd == DIFF_INDEX)
+ run_diff_index(&rev, info->cached);
+ else
+ run_diff_files(&rev, 0);
+ prepare_submodule_summary(info, &list);
+ strvec_clear(&diff_args);
+ return 0;
+}
+
+static int module_summary(int argc, const char **argv, const char *prefix)
+{
+ struct summary_cb info = SUMMARY_CB_INIT;
+ int cached = 0;
+ int for_status = 0;
+ int files = 0;
+ int summary_limit = -1;
+ enum diff_cmd diff_cmd = DIFF_INDEX;
+ struct object_id head_oid;
+ int ret;
+
+ struct option module_summary_options[] = {
+ OPT_BOOL(0, "cached", &cached,
+ N_("use the commit stored in the index instead of the submodule HEAD")),
+ OPT_BOOL(0, "files", &files,
+ N_("to compare the commit in the index with that in the submodule HEAD")),
+ OPT_BOOL(0, "for-status", &for_status,
+ N_("skip submodules with 'ignore_config' value set to 'all'")),
+ OPT_INTEGER('n', "summary-limit", &summary_limit,
+ N_("limit the summary size")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper summary [<options>] [commit] [--] [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_summary_options,
+ git_submodule_helper_usage, 0);
+
+ if (!summary_limit)
+ return 0;
+
+ if (!get_oid(argc ? argv[0] : "HEAD", &head_oid)) {
+ if (argc) {
+ argv++;
+ argc--;
+ }
+ } else if (!argc || !strcmp(argv[0], "HEAD")) {
+ /* before the first commit: compare with an empty tree */
+ oidcpy(&head_oid, the_hash_algo->empty_tree);
+ if (argc) {
+ argv++;
+ argc--;
+ }
+ } else {
+ if (get_oid("HEAD", &head_oid))
+ die(_("could not fetch a revision for HEAD"));
+ }
+
+ if (files) {
+ if (cached)
+ die(_("--cached and --files are mutually exclusive"));
+ diff_cmd = DIFF_FILES;
+ }
+
+ info.argc = argc;
+ info.argv = argv;
+ info.prefix = prefix;
+ info.cached = !!cached;
+ info.files = !!files;
+ info.for_status = !!for_status;
+ info.summary_limit = summary_limit;
+
+ ret = compute_summary_module_list((diff_cmd == DIFF_INDEX) ? &head_oid : NULL,
+ &info, diff_cmd);
+ return ret;
+}
+
struct sync_cb {
const char *prefix;
unsigned int flags;
};
-
#define SYNC_CB_INIT { NULL, 0 }
static void sync_submodule(const char *path, const char *prefix,
@@ -1511,7 +1936,7 @@ static void determine_submodule_update_strategy(struct repository *r,
if (parse_submodule_update_strategy(update, out) < 0)
die(_("Invalid update mode '%s' for submodule path '%s'"),
update, path);
- } else if (!repo_config_get_string_const(r, key, &val)) {
+ } else if (!repo_config_get_string_tmp(r, key, &val)) {
if (parse_submodule_update_strategy(val, out) < 0)
die(_("Invalid update mode '%s' configured for submodule path '%s'"),
val, path);
@@ -1667,7 +2092,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
}
key = xstrfmt("submodule.%s.update", sub->name);
- if (!repo_config_get_string_const(the_repository, key, &update_string)) {
+ if (!repo_config_get_string_tmp(the_repository, key, &update_string)) {
update_type = parse_submodule_update_type(update_string);
} else {
update_type = sub->update_strategy.type;
@@ -1690,7 +2115,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
strbuf_reset(&sb);
strbuf_addf(&sb, "submodule.%s.url", sub->name);
- if (repo_config_get_string_const(the_repository, sb.buf, &url)) {
+ if (repo_config_get_string_tmp(the_repository, sb.buf, &url)) {
if (starts_with_dot_slash(sub->url) ||
starts_with_dot_dot_slash(sub->url)) {
url = compute_submodule_clone_url(sub->url);
@@ -1747,8 +2172,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
"--no-single-branch");
cleanup:
- strbuf_reset(&displaypath_sb);
- strbuf_reset(&sb);
+ strbuf_release(&displaypath_sb);
+ strbuf_release(&sb);
if (need_free_url)
free((void*)url);
@@ -1976,7 +2401,7 @@ static const char *remote_submodule_branch(const char *path)
return NULL;
key = xstrfmt("submodule.%s.branch", sub->name);
- if (repo_config_get_string_const(the_repository, key, &branch))
+ if (repo_config_get_string_tmp(the_repository, key, &branch))
branch = sub->branch;
free(key);
@@ -2101,7 +2526,7 @@ static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
{
const struct submodule *sub;
const char *path;
- char *cw;
+ const char *cw;
struct repository subrepo;
if (argc != 2)
@@ -2116,7 +2541,7 @@ static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
if (repo_submodule_init(&subrepo, the_repository, sub))
die(_("could not get a repository handle for submodule '%s'"), path);
- if (!repo_config_get_string(&subrepo, "core.worktree", &cw)) {
+ if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
char *cfg_file, *abs_path;
const char *rel_path;
struct strbuf sb = STRBUF_INIT;
@@ -2344,6 +2769,7 @@ static struct cmd_struct commands[] = {
{"print-default-remote", print_default_remote, 0},
{"sync", module_sync, SUPPORT_SUPER_PREFIX},
{"deinit", module_deinit, 0},
+ {"summary", module_summary, SUPPORT_SUPER_PREFIX},
{"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 8165343145..bb70fde97e 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -924,7 +924,6 @@ static int move_worktree(int ac, const char **av, const char *prefix)
static void check_clean_worktree(struct worktree *wt,
const char *original_path)
{
- struct strvec child_env = STRVEC_INIT;
struct child_process cp;
char buf[1];
int ret;
@@ -935,15 +934,14 @@ static void check_clean_worktree(struct worktree *wt,
*/
validate_no_submodules(wt);
- strvec_pushf(&child_env, "%s=%s/.git",
+ child_process_init(&cp);
+ strvec_pushf(&cp.env_array, "%s=%s/.git",
GIT_DIR_ENVIRONMENT, wt->path);
- strvec_pushf(&child_env, "%s=%s",
+ strvec_pushf(&cp.env_array, "%s=%s",
GIT_WORK_TREE_ENVIRONMENT, wt->path);
- memset(&cp, 0, sizeof(cp));
strvec_pushl(&cp.args, "status",
"--porcelain", "--ignore-submodules=none",
NULL);
- cp.env = child_env.v;
cp.git_cmd = 1;
cp.dir = wt->path;
cp.out = -1;