summaryrefslogtreecommitdiff
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/add.c45
-rw-r--r--builtin/checkout.c73
-rw-r--r--builtin/commit.c9
-rw-r--r--builtin/fast-export.c3
-rw-r--r--builtin/grep.c96
-rw-r--r--builtin/merge.c9
-rw-r--r--builtin/pack-objects.c243
-rw-r--r--builtin/pull.c13
-rw-r--r--builtin/rebase.c18
-rw-r--r--builtin/reflog.c9
-rw-r--r--builtin/stash.c25
11 files changed, 374 insertions, 169 deletions
diff --git a/builtin/add.c b/builtin/add.c
index 4c38aff419..18a0881ecf 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -31,6 +31,7 @@ static int take_worktree_changes;
static int add_renormalize;
static int pathspec_file_nul;
static const char *pathspec_from_file;
+static int legacy_stash_p; /* support for the scripted `git stash` */
struct update_callback_data {
int flags;
@@ -196,12 +197,25 @@ int run_add_interactive(const char *revision, const char *patch_mode,
&use_builtin_add_i);
if (use_builtin_add_i == 1) {
+ enum add_p_mode mode;
+
if (!patch_mode)
return !!run_add_i(the_repository, pathspec);
- if (strcmp(patch_mode, "--patch"))
- die("'%s' not yet supported in the built-in add -p",
- patch_mode);
- return !!run_add_p(the_repository, pathspec);
+
+ if (!strcmp(patch_mode, "--patch"))
+ mode = ADD_P_ADD;
+ else if (!strcmp(patch_mode, "--patch=stash"))
+ mode = ADD_P_STASH;
+ else if (!strcmp(patch_mode, "--patch=reset"))
+ mode = ADD_P_RESET;
+ else if (!strcmp(patch_mode, "--patch=checkout"))
+ mode = ADD_P_CHECKOUT;
+ else if (!strcmp(patch_mode, "--patch=worktree"))
+ mode = ADD_P_WORKTREE;
+ else
+ die("'%s' not supported", patch_mode);
+
+ return !!run_add_p(the_repository, mode, revision, pathspec);
}
argv_array_push(&argv, "add--interactive");
@@ -327,6 +341,8 @@ static struct option builtin_add_options[] = {
N_("override the executable bit of the listed files")),
OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
N_("warn when adding an embedded repository")),
+ OPT_HIDDEN_BOOL(0, "legacy-stash-p", &legacy_stash_p,
+ N_("backend for `git stash -p`")),
OPT_PATHSPEC_FROM_FILE(&pathspec_from_file),
OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul),
OPT_END(),
@@ -390,7 +406,10 @@ static int add_files(struct dir_struct *dir, int flags)
fprintf(stderr, _(ignore_error));
for (i = 0; i < dir->ignored_nr; i++)
fprintf(stderr, "%s\n", dir->ignored[i]->name);
- fprintf(stderr, _("Use -f if you really want to add them.\n"));
+ if (advice_add_ignored_file)
+ advise(_("Use -f if you really want to add them.\n"
+ "Turn this message off by running\n"
+ "\"git config advice.addIgnoredFile false\""));
exit_status = 1;
}
@@ -428,6 +447,17 @@ int cmd_add(int argc, const char **argv, const char *prefix)
die(_("--pathspec-from-file is incompatible with --interactive/--patch"));
exit(interactive_add(argc - 1, argv + 1, prefix, patch_interactive));
}
+ if (legacy_stash_p) {
+ struct pathspec pathspec;
+
+ parse_pathspec(&pathspec, 0,
+ PATHSPEC_PREFER_FULL |
+ PATHSPEC_SYMLINK_LEADING_PATH |
+ PATHSPEC_PREFIX_ORIGIN,
+ prefix, argv);
+
+ return run_add_interactive(NULL, "--patch=stash", &pathspec);
+ }
if (edit_interactive) {
if (pathspec_from_file)
@@ -480,7 +510,10 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (require_pathspec && pathspec.nr == 0) {
fprintf(stderr, _("Nothing specified, nothing added.\n"));
- fprintf(stderr, _("Maybe you wanted to say 'git add .'?\n"));
+ if (advice_add_empty_pathspec)
+ advise( _("Maybe you wanted to say 'git add .'?\n"
+ "Turn this message off by running\n"
+ "\"git config advice.addEmptyPathspec false\""));
return 0;
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index b52c490c8f..fc2eb1befc 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -524,6 +524,8 @@ static int checkout_paths(const struct checkout_opts *opts,
/* Now we are committed to check them out */
if (opts->checkout_worktree)
errs |= checkout_worktree(opts);
+ else
+ remove_marked_cache_entries(&the_index, 1);
/*
* Allow updating the index when checking out from the index.
@@ -1115,12 +1117,43 @@ static void setup_new_branch_info_and_source_tree(
}
}
+static const char *parse_remote_branch(const char *arg,
+ struct object_id *rev,
+ int could_be_checkout_paths)
+{
+ int num_matches = 0;
+ const char *remote = unique_tracking_name(arg, rev, &num_matches);
+
+ if (remote && could_be_checkout_paths) {
+ die(_("'%s' could be both a local file and a tracking branch.\n"
+ "Please use -- (and optionally --no-guess) to disambiguate"),
+ arg);
+ }
+
+ if (!remote && num_matches > 1) {
+ if (advice_checkout_ambiguous_remote_branch_name) {
+ advise(_("If you meant to check out a remote tracking branch on, e.g. 'origin',\n"
+ "you can do so by fully qualifying the name with the --track option:\n"
+ "\n"
+ " git checkout --track origin/<name>\n"
+ "\n"
+ "If you'd like to always have checkouts of an ambiguous <name> prefer\n"
+ "one remote, e.g. the 'origin' remote, consider setting\n"
+ "checkout.defaultRemote=origin in your config."));
+ }
+
+ die(_("'%s' matched multiple (%d) remote tracking branches"),
+ arg, num_matches);
+ }
+
+ return remote;
+}
+
static int parse_branchname_arg(int argc, const char **argv,
int dwim_new_local_branch_ok,
struct branch_info *new_branch_info,
struct checkout_opts *opts,
- struct object_id *rev,
- int *dwim_remotes_matched)
+ struct object_id *rev)
{
const char **new_branch = &opts->new_branch;
int argcount = 0;
@@ -1225,13 +1258,9 @@ static int parse_branchname_arg(int argc, const char **argv,
recover_with_dwim = 0;
if (recover_with_dwim) {
- const char *remote = unique_tracking_name(arg, rev,
- dwim_remotes_matched);
+ const char *remote = parse_remote_branch(arg, rev,
+ could_be_checkout_paths);
if (remote) {
- if (could_be_checkout_paths)
- die(_("'%s' could be both a local file and a tracking branch.\n"
- "Please use -- (and optionally --no-guess) to disambiguate"),
- arg);
*new_branch = arg;
arg = remote;
/* DWIMmed to create local branch, case (3).(b) */
@@ -1496,7 +1525,6 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
const char * const usagestr[])
{
struct branch_info new_branch_info;
- int dwim_remotes_matched = 0;
int parseopt_flags = 0;
memset(&new_branch_info, 0, sizeof(new_branch_info));
@@ -1604,8 +1632,7 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
opts->track == BRANCH_TRACK_UNSPECIFIED &&
!opts->new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new_branch_info, opts, &rev,
- &dwim_remotes_matched);
+ &new_branch_info, opts, &rev);
argv += n;
argc -= n;
} else if (!opts->accept_ref && opts->from_treeish) {
@@ -1682,28 +1709,10 @@ static int checkout_main(int argc, const char **argv, const char *prefix,
}
UNLEAK(opts);
- if (opts->patch_mode || opts->pathspec.nr) {
- int ret = checkout_paths(opts, new_branch_info.name);
- if (ret && dwim_remotes_matched > 1 &&
- advice_checkout_ambiguous_remote_branch_name)
- advise(_("'%s' matched more than one remote tracking branch.\n"
- "We found %d remotes with a reference that matched. So we fell back\n"
- "on trying to resolve the argument as a path, but failed there too!\n"
- "\n"
- "If you meant to check out a remote tracking branch on, e.g. 'origin',\n"
- "you can do so by fully qualifying the name with the --track option:\n"
- "\n"
- " git checkout --track origin/<name>\n"
- "\n"
- "If you'd like to always have checkouts of an ambiguous <name> prefer\n"
- "one remote, e.g. the 'origin' remote, consider setting\n"
- "checkout.defaultRemote=origin in your config."),
- argv[0],
- dwim_remotes_matched);
- return ret;
- } else {
+ if (opts->patch_mode || opts->pathspec.nr)
+ return checkout_paths(opts, new_branch_info.name);
+ else
return checkout_branch(opts, &new_branch_info);
- }
}
int cmd_checkout(int argc, const char **argv, const char *prefix)
diff --git a/builtin/commit.c b/builtin/commit.c
index aa1332308a..c70ad01cc9 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -367,7 +367,7 @@ static const char *prepare_index(int argc, const char **argv, const char *prefix
die(_("index file corrupt"));
if (interactive) {
- char *old_index_env = NULL;
+ char *old_index_env = NULL, *old_repo_index_file;
hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR);
refresh_cache_or_die(refresh_flags);
@@ -375,12 +375,16 @@ static const char *prepare_index(int argc, const char **argv, const char *prefix
if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to create temporary index"));
+ old_repo_index_file = the_repository->index_file;
+ the_repository->index_file =
+ (char *)get_lock_file_path(&index_lock);
old_index_env = xstrdup_or_null(getenv(INDEX_ENVIRONMENT));
- setenv(INDEX_ENVIRONMENT, get_lock_file_path(&index_lock), 1);
+ setenv(INDEX_ENVIRONMENT, the_repository->index_file, 1);
if (interactive_add(argc, argv, prefix, patch_interactive) != 0)
die(_("interactive add failed"));
+ the_repository->index_file = old_repo_index_file;
if (old_index_env && *old_index_env)
setenv(INDEX_ENVIRONMENT, old_index_env, 1);
else
@@ -964,6 +968,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
*/
if (!committable && whence != FROM_MERGE && !allow_empty &&
!(amend && is_a_merge(current_head))) {
+ s->hints = advice_status_hints;
s->display_comment_prefix = old_display_comment_prefix;
run_status(stdout, index_file, prefix, 0, s);
if (amend)
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index dbec4df92b..164406fdd4 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -870,8 +870,7 @@ static void handle_tag(const char *name, struct tag *tag)
printf("reset %s\nfrom %s\n\n",
name, oid_to_hex(&null_oid));
}
- if (starts_with(name, "refs/tags/"))
- name += 10;
+ skip_prefix(name, "refs/tags/", &name);
printf("tag %s\n", name);
if (mark_tags) {
mark_next_object(&tag->object);
diff --git a/builtin/grep.c b/builtin/grep.c
index 50ce8d9461..99e2685090 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -24,6 +24,7 @@
#include "submodule.h"
#include "submodule-config.h"
#include "object-store.h"
+#include "packfile.h"
static char const * const grep_usage[] = {
N_("git grep [<options>] [-e] <pattern> [<rev>...] [[--] <path>...]"),
@@ -32,7 +33,6 @@ static char const * const grep_usage[] = {
static int recurse_submodules;
-#define GREP_NUM_THREADS_DEFAULT 8
static int num_threads;
static pthread_t *threads;
@@ -91,8 +91,11 @@ static pthread_cond_t cond_result;
static int skip_first_line;
-static void add_work(struct grep_opt *opt, const struct grep_source *gs)
+static void add_work(struct grep_opt *opt, struct grep_source *gs)
{
+ if (opt->binary != GREP_BINARY_TEXT)
+ grep_source_load_driver(gs, opt->repo->index);
+
grep_lock();
while ((todo_end+1) % ARRAY_SIZE(todo) == todo_done) {
@@ -100,9 +103,6 @@ static void add_work(struct grep_opt *opt, const struct grep_source *gs)
}
todo[todo_end].source = *gs;
- if (opt->binary != GREP_BINARY_TEXT)
- grep_source_load_driver(&todo[todo_end].source,
- opt->repo->index);
todo[todo_end].done = 0;
strbuf_reset(&todo[todo_end].out);
todo_end = (todo_end + 1) % ARRAY_SIZE(todo);
@@ -200,12 +200,12 @@ static void start_threads(struct grep_opt *opt)
int i;
pthread_mutex_init(&grep_mutex, NULL);
- pthread_mutex_init(&grep_read_mutex, NULL);
pthread_mutex_init(&grep_attr_mutex, NULL);
pthread_cond_init(&cond_add, NULL);
pthread_cond_init(&cond_write, NULL);
pthread_cond_init(&cond_result, NULL);
grep_use_locks = 1;
+ enable_obj_read_lock();
for (i = 0; i < ARRAY_SIZE(todo); i++) {
strbuf_init(&todo[i].out, 0);
@@ -257,12 +257,12 @@ static int wait_all(void)
free(threads);
pthread_mutex_destroy(&grep_mutex);
- pthread_mutex_destroy(&grep_read_mutex);
pthread_mutex_destroy(&grep_attr_mutex);
pthread_cond_destroy(&cond_add);
pthread_cond_destroy(&cond_write);
pthread_cond_destroy(&cond_result);
grep_use_locks = 0;
+ disable_obj_read_lock();
return hit;
}
@@ -295,16 +295,6 @@ static int grep_cmd_config(const char *var, const char *value, void *cb)
return st;
}
-static void *lock_and_read_oid_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
-{
- void *data;
-
- grep_read_lock();
- data = read_object_file(oid, type, size);
- grep_read_unlock();
- return data;
-}
-
static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
const char *filename, int tree_name_len,
const char *path)
@@ -407,30 +397,28 @@ static int grep_submodule(struct grep_opt *opt,
{
struct repository subrepo;
struct repository *superproject = opt->repo;
- const struct submodule *sub = submodule_from_path(superproject,
- &null_oid, path);
+ const struct submodule *sub;
struct grep_opt subopt;
int hit;
- /*
- * NEEDSWORK: submodules functions need to be protected because they
- * access the object store via config_from_gitmodules(): the latter
- * uses get_oid() which, for now, relies on the global the_repository
- * object.
- */
- grep_read_lock();
+ sub = submodule_from_path(superproject, &null_oid, path);
- if (!is_submodule_active(superproject, path)) {
- grep_read_unlock();
+ if (!is_submodule_active(superproject, path))
return 0;
- }
- if (repo_submodule_init(&subrepo, superproject, sub)) {
- grep_read_unlock();
+ if (repo_submodule_init(&subrepo, superproject, sub))
return 0;
- }
- repo_read_gitmodules(&subrepo);
+ /*
+ * NEEDSWORK: repo_read_gitmodules() might call
+ * add_to_alternates_memory() via config_from_gitmodules(). This
+ * operation causes a race condition with concurrent object readings
+ * performed by the worker threads. That's why we need obj_read_lock()
+ * here. It should be removed once it's no longer necessary to add the
+ * subrepo's odbs to the in-memory alternates list.
+ */
+ obj_read_lock();
+ repo_read_gitmodules(&subrepo, 0);
/*
* NEEDSWORK: This adds the submodule's object directory to the list of
@@ -443,7 +431,7 @@ static int grep_submodule(struct grep_opt *opt,
* object.
*/
add_to_alternates_memory(subrepo.objects->odb->path);
- grep_read_unlock();
+ obj_read_unlock();
memcpy(&subopt, opt, sizeof(subopt));
subopt.repo = &subrepo;
@@ -455,14 +443,12 @@ static int grep_submodule(struct grep_opt *opt,
unsigned long size;
struct strbuf base = STRBUF_INIT;
+ obj_read_lock();
object = parse_object_or_die(oid, oid_to_hex(oid));
-
- grep_read_lock();
+ obj_read_unlock();
data = read_object_with_reference(&subrepo,
&object->oid, tree_type,
&size, NULL);
- grep_read_unlock();
-
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(&object->oid));
@@ -587,7 +573,7 @@ static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
void *data;
unsigned long size;
- data = lock_and_read_oid_file(&entry.oid, &type, &size);
+ data = read_object_file(&entry.oid, &type, &size);
if (!data)
die(_("unable to read tree (%s)"),
oid_to_hex(&entry.oid));
@@ -625,12 +611,9 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
struct strbuf base;
int hit, len;
- grep_read_lock();
data = read_object_with_reference(opt->repo,
&obj->oid, tree_type,
&size, NULL);
- grep_read_unlock();
-
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(&obj->oid));
@@ -659,13 +642,18 @@ static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
for (i = 0; i < nr; i++) {
struct object *real_obj;
+
+ obj_read_lock();
real_obj = deref_tag(opt->repo, list->objects[i].item,
NULL, 0);
+ obj_read_unlock();
/* load the gitmodules file for this rev */
if (recurse_submodules) {
submodule_free(opt->repo);
+ obj_read_lock();
gitmodules_config_oid(&real_obj->oid);
+ obj_read_unlock();
}
if (grep_object(opt, pathspec, real_obj, list->objects[i].name,
list->objects[i].path)) {
@@ -958,6 +946,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
/* die the same way as if we did it at the beginning */
setup_git_directory();
}
+ /* Ignore --recurse-submodules if --no-index is given or implied */
+ if (!use_index)
+ recurse_submodules = 0;
/*
* skip a -- separator; we know it cannot be
@@ -1062,7 +1053,10 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
pathspec.recursive = 1;
pathspec.recurse_submodules = !!recurse_submodules;
- if (list.nr || cached || show_in_pager) {
+ if (recurse_submodules && untracked)
+ die(_("--untracked not supported with --recurse-submodules"));
+
+ if (show_in_pager) {
if (num_threads > 1)
warning(_("invalid option combination, ignoring --threads"));
num_threads = 1;
@@ -1072,7 +1066,7 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
} else if (num_threads < 0)
die(_("invalid number of threads specified (%d)"), num_threads);
else if (num_threads == 0)
- num_threads = HAVE_THREADS ? GREP_NUM_THREADS_DEFAULT : 1;
+ num_threads = HAVE_THREADS ? online_cpus() : 1;
if (num_threads > 1) {
if (!HAVE_THREADS)
@@ -1081,6 +1075,17 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
&& (opt.pre_context || opt.post_context ||
opt.file_break || opt.funcbody))
skip_first_line = 1;
+
+ /*
+ * Pre-read gitmodules (if not read already) and force eager
+ * initialization of packed_git to prevent racy lazy
+ * reading/initialization once worker threads are started.
+ */
+ if (recurse_submodules)
+ repo_read_gitmodules(the_repository, 1);
+ if (startup_info->have_repository)
+ (void)get_packed_git(the_repository);
+
start_threads(&opt);
} else {
/*
@@ -1115,9 +1120,6 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
}
}
- if (recurse_submodules && (!use_index || untracked))
- die(_("option not supported with --recurse-submodules"));
-
if (!show_in_pager && !opt.status_only)
setup_pager();
diff --git a/builtin/merge.c b/builtin/merge.c
index 062e911441..d127d2225f 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -62,6 +62,7 @@ static int show_diffstat = 1, shortlog_len = -1, squash;
static int option_commit = -1;
static int option_edit = -1;
static int allow_trivial = 1, have_message, verify_signatures;
+static int check_trust_level = 1;
static int overwrite_ignore = 1;
static struct strbuf merge_msg = STRBUF_INIT;
static struct strategy **use_strategies;
@@ -631,6 +632,8 @@ static int git_merge_config(const char *k, const char *v, void *cb)
} else if (!strcmp(k, "commit.gpgsign")) {
sign_commit = git_config_bool(k, v) ? "" : NULL;
return 0;
+ } else if (!strcmp(k, "gpg.mintrustlevel")) {
+ check_trust_level = 0;
}
status = fmt_merge_msg_config(k, v, cb);
@@ -1397,7 +1400,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
die(_("Can merge only exactly one commit into empty head"));
if (verify_signatures)
- verify_merge_signature(remoteheads->item, verbosity);
+ verify_merge_signature(remoteheads->item, verbosity,
+ check_trust_level);
remote_head_oid = &remoteheads->item->object.oid;
read_empty(remote_head_oid, 0);
@@ -1420,7 +1424,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
if (verify_signatures) {
for (p = remoteheads; p; p = p->next) {
- verify_merge_signature(p->item, verbosity);
+ verify_merge_signature(p->item, verbosity,
+ check_trust_level);
}
}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 393c20a2d7..b1998202fb 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -92,10 +92,11 @@ static struct progress *progress_state;
static struct packed_git *reuse_packfile;
static uint32_t reuse_packfile_objects;
-static off_t reuse_packfile_offset;
+static struct bitmap *reuse_packfile_bitmap;
static int use_bitmap_index_default = 1;
static int use_bitmap_index = -1;
+static int allow_pack_reuse = 1;
static enum {
WRITE_BITMAP_FALSE = 0,
WRITE_BITMAP_QUIET,
@@ -784,57 +785,185 @@ static struct object_entry **compute_write_order(void)
return wo;
}
-static off_t write_reused_pack(struct hashfile *f)
+
+/*
+ * A reused set of objects. All objects in a chunk have the same
+ * relative position in the original packfile and the generated
+ * packfile.
+ */
+
+static struct reused_chunk {
+ /* The offset of the first object of this chunk in the original
+ * packfile. */
+ off_t original;
+ /* The offset of the first object of this chunk in the generated
+ * packfile minus "original". */
+ off_t difference;
+} *reused_chunks;
+static int reused_chunks_nr;
+static int reused_chunks_alloc;
+
+static void record_reused_object(off_t where, off_t offset)
+{
+ if (reused_chunks_nr && reused_chunks[reused_chunks_nr-1].difference == offset)
+ return;
+
+ ALLOC_GROW(reused_chunks, reused_chunks_nr + 1,
+ reused_chunks_alloc);
+ reused_chunks[reused_chunks_nr].original = where;
+ reused_chunks[reused_chunks_nr].difference = offset;
+ reused_chunks_nr++;
+}
+
+/*
+ * Binary search to find the chunk that "where" is in. Note
+ * that we're not looking for an exact match, just the first
+ * chunk that contains it (which implicitly ends at the start
+ * of the next chunk.
+ */
+static off_t find_reused_offset(off_t where)
+{
+ int lo = 0, hi = reused_chunks_nr;
+ while (lo < hi) {
+ int mi = lo + ((hi - lo) / 2);
+ if (where == reused_chunks[mi].original)
+ return reused_chunks[mi].difference;
+ if (where < reused_chunks[mi].original)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+
+ /*
+ * The first chunk starts at zero, so we can't have gone below
+ * there.
+ */
+ assert(lo);
+ return reused_chunks[lo-1].difference;
+}
+
+static void write_reused_pack_one(size_t pos, struct hashfile *out,
+ struct pack_window **w_curs)
{
- unsigned char buffer[8192];
- off_t to_write, total;
- int fd;
+ off_t offset, next, cur;
+ enum object_type type;
+ unsigned long size;
- if (!is_pack_valid(reuse_packfile))
- die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
+ offset = reuse_packfile->revindex[pos].offset;
+ next = reuse_packfile->revindex[pos + 1].offset;
- fd = git_open(reuse_packfile->pack_name);
- if (fd < 0)
- die_errno(_("unable to open packfile for reuse: %s"),
- reuse_packfile->pack_name);
+ record_reused_object(offset, offset - hashfile_total(out));
- if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
- die_errno(_("unable to seek in reused packfile"));
+ cur = offset;
+ type = unpack_object_header(reuse_packfile, w_curs, &cur, &size);
+ assert(type >= 0);
- if (reuse_packfile_offset < 0)
- reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
+ if (type == OBJ_OFS_DELTA) {
+ off_t base_offset;
+ off_t fixup;
+
+ unsigned char header[MAX_PACK_OBJECT_HEADER];
+ unsigned len;
+
+ base_offset = get_delta_base(reuse_packfile, w_curs, &cur, type, offset);
+ assert(base_offset != 0);
+
+ /* Convert to REF_DELTA if we must... */
+ if (!allow_ofs_delta) {
+ int base_pos = find_revindex_position(reuse_packfile, base_offset);
+ const unsigned char *base_sha1 =
+ nth_packed_object_sha1(reuse_packfile,
+ reuse_packfile->revindex[base_pos].nr);
+
+ len = encode_in_pack_object_header(header, sizeof(header),
+ OBJ_REF_DELTA, size);
+ hashwrite(out, header, len);
+ hashwrite(out, base_sha1, 20);
+ copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur);
+ return;
+ }
- total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
+ /* Otherwise see if we need to rewrite the offset... */
+ fixup = find_reused_offset(offset) -
+ find_reused_offset(base_offset);
+ if (fixup) {
+ unsigned char ofs_header[10];
+ unsigned i, ofs_len;
+ off_t ofs = offset - base_offset - fixup;
- while (to_write) {
- int read_pack = xread(fd, buffer, sizeof(buffer));
+ len = encode_in_pack_object_header(header, sizeof(header),
+ OBJ_OFS_DELTA, size);
- if (read_pack <= 0)
- die_errno(_("unable to read from reused packfile"));
+ i = sizeof(ofs_header) - 1;
+ ofs_header[i] = ofs & 127;
+ while (ofs >>= 7)
+ ofs_header[--i] = 128 | (--ofs & 127);
- if (read_pack > to_write)
- read_pack = to_write;
+ ofs_len = sizeof(ofs_header) - i;
- hashwrite(f, buffer, read_pack);
- to_write -= read_pack;
+ hashwrite(out, header, len);
+ hashwrite(out, ofs_header + sizeof(ofs_header) - ofs_len, ofs_len);
+ copy_pack_data(out, reuse_packfile, w_curs, cur, next - cur);
+ return;
+ }
+
+ /* ...otherwise we have no fixup, and can write it verbatim */
+ }
+
+ copy_pack_data(out, reuse_packfile, w_curs, offset, next - offset);
+}
+
+static size_t write_reused_pack_verbatim(struct hashfile *out,
+ struct pack_window **w_curs)
+{
+ size_t pos = 0;
+
+ while (pos < reuse_packfile_bitmap->word_alloc &&
+ reuse_packfile_bitmap->words[pos] == (eword_t)~0)
+ pos++;
+
+ if (pos) {
+ off_t to_write;
+
+ written = (pos * BITS_IN_EWORD);
+ to_write = reuse_packfile->revindex[written].offset
+ - sizeof(struct pack_header);
+
+ /* We're recording one chunk, not one object. */
+ record_reused_object(sizeof(struct pack_header), 0);
+ hashflush(out);
+ copy_pack_data(out, reuse_packfile, w_curs,
+ sizeof(struct pack_header), to_write);
- /*
- * We don't know the actual number of objects written,
- * only how many bytes written, how many bytes total, and
- * how many objects total. So we can fake it by pretending all
- * objects we are writing are the same size. This gives us a
- * smooth progress meter, and at the end it matches the true
- * answer.
- */
- written = reuse_packfile_objects *
- (((double)(total - to_write)) / total);
display_progress(progress_state, written);
}
+ return pos;
+}
+
+static void write_reused_pack(struct hashfile *f)
+{
+ size_t i = 0;
+ uint32_t offset;
+ struct pack_window *w_curs = NULL;
+
+ if (allow_ofs_delta)
+ i = write_reused_pack_verbatim(f, &w_curs);
+
+ for (; i < reuse_packfile_bitmap->word_alloc; ++i) {
+ eword_t word = reuse_packfile_bitmap->words[i];
+ size_t pos = (i * BITS_IN_EWORD);
- close(fd);
- written = reuse_packfile_objects;
- display_progress(progress_state, written);
- return reuse_packfile_offset - sizeof(struct pack_header);
+ for (offset = 0; offset < BITS_IN_EWORD; ++offset) {
+ if ((word >> offset) == 0)
+ break;
+
+ offset += ewah_bit_ctz64(word >> offset);
+ write_reused_pack_one(pos + offset, f, &w_curs);
+ display_progress(progress_state, ++written);
+ }
+ }
+
+ unuse_pack(&w_curs);
}
static const char no_split_warning[] = N_(
@@ -867,11 +996,9 @@ static void write_pack_file(void)
offset = write_pack_header(f, nr_remaining);
if (reuse_packfile) {
- off_t packfile_size;
assert(pack_to_stdout);
-
- packfile_size = write_reused_pack(f);
- offset += packfile_size;
+ write_reused_pack(f);
+ offset = hashfile_total(f);
}
nr_written = 0;
@@ -1000,6 +1127,10 @@ static int have_duplicate_entry(const struct object_id *oid,
{
struct object_entry *entry;
+ if (reuse_packfile_bitmap &&
+ bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid))
+ return 1;
+
entry = packlist_find(&to_pack, oid);
if (!entry)
return 0;
@@ -2552,6 +2683,13 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
free(p);
}
+static int obj_is_packed(const struct object_id *oid)
+{
+ return packlist_find(&to_pack, oid) ||
+ (reuse_packfile_bitmap &&
+ bitmap_walk_contains(bitmap_git, reuse_packfile_bitmap, oid));
+}
+
static void add_tag_chain(const struct object_id *oid)
{
struct tag *tag;
@@ -2563,7 +2701,7 @@ static void add_tag_chain(const struct object_id *oid)
* it was included via bitmaps, we would not have parsed it
* previously).
*/
- if (packlist_find(&to_pack, oid))
+ if (obj_is_packed(oid))
return;
tag = lookup_tag(the_repository, oid);
@@ -2587,7 +2725,7 @@ static int add_ref_tag(const char *path, const struct object_id *oid, int flag,
if (starts_with(path, "refs/tags/") && /* is a tag? */
!peel_ref(path, &peeled) && /* peelable? */
- packlist_find(&to_pack, &peeled)) /* object packed? */
+ obj_is_packed(&peeled)) /* object packed? */
add_tag_chain(oid);
return 0;
}
@@ -2655,6 +2793,7 @@ static void prepare_pack(int window, int depth)
if (nr_deltas && n > 1) {
unsigned nr_done = 0;
+
if (progress)
progress_state = start_progress(_("Compressing objects"),
nr_deltas);
@@ -2699,6 +2838,10 @@ static int git_pack_config(const char *k, const char *v, void *cb)
use_bitmap_index_default = git_config_bool(k, v);
return 0;
}
+ if (!strcmp(k, "pack.allowpackreuse")) {
+ allow_pack_reuse = git_config_bool(k, v);
+ return 0;
+ }
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
@@ -3030,8 +3173,8 @@ static void loosen_unused_packed_objects(void)
*/
static int pack_options_allow_reuse(void)
{
- return pack_to_stdout &&
- allow_ofs_delta &&
+ return allow_pack_reuse &&
+ pack_to_stdout &&
!ignore_packed_keep_on_disk &&
!ignore_packed_keep_in_core &&
(!local || !have_non_local_packs) &&
@@ -3048,7 +3191,7 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
bitmap_git,
&reuse_packfile,
&reuse_packfile_objects,
- &reuse_packfile_offset)) {
+ &reuse_packfile_bitmap)) {
assert(reuse_packfile_objects);
nr_result += reuse_packfile_objects;
display_progress(progress_state, nr_result);
@@ -3509,7 +3652,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (progress)
fprintf_ln(stderr,
_("Total %"PRIu32" (delta %"PRIu32"),"
- " reused %"PRIu32" (delta %"PRIu32")"),
- written, written_delta, reused, reused_delta);
+ " reused %"PRIu32" (delta %"PRIu32"),"
+ " pack-reused %"PRIu32),
+ written, written_delta, reused, reused_delta,
+ reuse_packfile_objects);
return 0;
}
diff --git a/builtin/pull.c b/builtin/pull.c
index d25ff13a60..d4e3e77c8e 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -107,6 +107,7 @@ static char *opt_ff;
static char *opt_verify_signatures;
static int opt_autostash = -1;
static int config_autostash;
+static int check_trust_level = 1;
static struct argv_array opt_strategies = ARGV_ARRAY_INIT;
static struct argv_array opt_strategy_opts = ARGV_ARRAY_INIT;
static char *opt_gpg_sign;
@@ -355,6 +356,8 @@ static enum rebase_type config_get_rebase(void)
*/
static int git_pull_config(const char *var, const char *value, void *cb)
{
+ int status;
+
if (!strcmp(var, "rebase.autostash")) {
config_autostash = git_config_bool(var, value);
return 0;
@@ -362,7 +365,14 @@ static int git_pull_config(const char *var, const char *value, void *cb)
recurse_submodules = git_config_bool(var, value) ?
RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
return 0;
+ } else if (!strcmp(var, "gpg.mintrustlevel")) {
+ check_trust_level = 0;
}
+
+ status = git_gpg_config(var, value, cb);
+ if (status)
+ return status;
+
return git_default_config(var, value, cb);
}
@@ -587,7 +597,8 @@ static int pull_into_void(const struct object_id *merge_head,
die(_("unable to access commit %s"),
oid_to_hex(merge_head));
- verify_merge_signature(commit, opt_verbosity);
+ verify_merge_signature(commit, opt_verbosity,
+ check_trust_level);
}
/*
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 8081741f8a..6154ad8fa5 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -246,21 +246,17 @@ static int edit_todo_file(unsigned flags)
}
static int get_revision_ranges(struct commit *upstream, struct commit *onto,
- const char **head_hash,
+ struct object_id *orig_head, const char **head_hash,
char **revisions, char **shortrevisions)
{
struct commit *base_rev = upstream ? upstream : onto;
const char *shorthead;
- struct object_id orig_head;
-
- if (get_oid("HEAD", &orig_head))
- return error(_("no HEAD?"));
- *head_hash = find_unique_abbrev(&orig_head, GIT_MAX_HEXSZ);
+ *head_hash = find_unique_abbrev(orig_head, GIT_MAX_HEXSZ);
*revisions = xstrfmt("%s...%s", oid_to_hex(&base_rev->object.oid),
*head_hash);
- shorthead = find_unique_abbrev(&orig_head, DEFAULT_ABBREV);
+ shorthead = find_unique_abbrev(orig_head, DEFAULT_ABBREV);
if (upstream) {
const char *shortrev;
@@ -314,12 +310,8 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
struct replay_opts replay = get_replay_opts(opts);
struct string_list commands = STRING_LIST_INIT_DUP;
- if (prepare_branch_to_be_rebased(the_repository, &replay,
- opts->switch_to))
- return -1;
-
- if (get_revision_ranges(opts->upstream, opts->onto, &head_hash,
- &revisions, &shortrevisions))
+ if (get_revision_ranges(opts->upstream, opts->onto, &opts->orig_head,
+ &head_hash, &revisions, &shortrevisions))
return -1;
if (init_basic_state(&replay,
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 4d3430900d..81dfd563c0 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -560,15 +560,16 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
+
if (!strcmp(arg, "--dry-run") || !strcmp(arg, "-n"))
flags |= EXPIRE_REFLOGS_DRY_RUN;
- else if (starts_with(arg, "--expire=")) {
- if (parse_expiry_date(arg + 9, &cb.cmd.expire_total))
+ else if (skip_prefix(arg, "--expire=", &arg)) {
+ if (parse_expiry_date(arg, &cb.cmd.expire_total))
die(_("'%s' is not a valid timestamp"), arg);
explicit_expiry |= EXPIRE_TOTAL;
}
- else if (starts_with(arg, "--expire-unreachable=")) {
- if (parse_expiry_date(arg + 21, &cb.cmd.expire_unreachable))
+ else if (skip_prefix(arg, "--expire-unreachable=", &arg)) {
+ if (parse_expiry_date(arg, &cb.cmd.expire_unreachable))
die(_("'%s' is not a valid timestamp"), arg);
explicit_expiry |= EXPIRE_UNREACH;
}
diff --git a/builtin/stash.c b/builtin/stash.c
index 4ad3adf4ba..879fc5f368 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -998,9 +998,9 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
{
int ret = 0;
struct child_process cp_read_tree = CHILD_PROCESS_INIT;
- struct child_process cp_add_i = CHILD_PROCESS_INIT;
struct child_process cp_diff_tree = CHILD_PROCESS_INIT;
struct index_state istate = { NULL };
+ char *old_index_env = NULL, *old_repo_index_file;
remove_path(stash_index_path.buf);
@@ -1014,16 +1014,19 @@ static int stash_patch(struct stash_info *info, const struct pathspec *ps,
}
/* Find out what the user wants. */
- cp_add_i.git_cmd = 1;
- argv_array_pushl(&cp_add_i.args, "add--interactive", "--patch=stash",
- "--", NULL);
- add_pathspecs(&cp_add_i.args, ps);
- argv_array_pushf(&cp_add_i.env_array, "GIT_INDEX_FILE=%s",
- stash_index_path.buf);
- if (run_command(&cp_add_i)) {
- ret = -1;
- goto done;
- }
+ old_repo_index_file = the_repository->index_file;
+ the_repository->index_file = stash_index_path.buf;
+ old_index_env = xstrdup_or_null(getenv(INDEX_ENVIRONMENT));
+ setenv(INDEX_ENVIRONMENT, the_repository->index_file, 1);
+
+ ret = run_add_interactive(NULL, "--patch=stash", ps);
+
+ the_repository->index_file = old_repo_index_file;
+ if (old_index_env && *old_index_env)
+ setenv(INDEX_ENVIRONMENT, old_index_env, 1);
+ else
+ unsetenv(INDEX_ENVIRONMENT);
+ FREE_AND_NULL(old_index_env);
/* State of the working tree. */
if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,