diff options
Diffstat (limited to 'builtin/fetch.c')
-rw-r--r-- | builtin/fetch.c | 304 |
1 files changed, 230 insertions, 74 deletions
diff --git a/builtin/fetch.c b/builtin/fetch.c index 717dd14e89..863c858fde 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -7,6 +7,7 @@ #include "refs.h" #include "refspec.h" #include "object-store.h" +#include "oidset.h" #include "commit.h" #include "builtin.h" #include "string-list.h" @@ -23,6 +24,9 @@ #include "packfile.h" #include "list-objects-filter-options.h" #include "commit-reach.h" +#include "branch.h" +#include "promisor-remote.h" +#include "commit-graph.h" #define FORCED_UPDATES_DELAY_WARNING_IN_MS (10 * 1000) @@ -50,11 +54,13 @@ static int fetch_prune_tags_config = -1; /* unspecified */ static int prune_tags = -1; /* unspecified */ #define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */ -static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative; +static int all, append, dry_run, force, keep, multiple, update_head_ok; +static int verbosity, deepen_relative, set_upstream; static int progress = -1; static int enable_auto_gc = 1; static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen; -static int max_children = 1; +static int max_jobs = -1, submodule_fetch_jobs_config = -1; +static int fetch_parallel_config = 1; static enum transport_family family; static const char *depth; static const char *deepen_since; @@ -96,13 +102,20 @@ static int git_fetch_config(const char *k, const char *v, void *cb) } if (!strcmp(k, "submodule.fetchjobs")) { - max_children = parse_submodule_fetchjobs(k, v); + submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v); return 0; } else if (!strcmp(k, "fetch.recursesubmodules")) { recurse_submodules = parse_fetch_recurse_submodules_arg(k, v); return 0; } + if (!strcmp(k, "fetch.parallel")) { + fetch_parallel_config = git_config_int(k, v); + if (fetch_parallel_config < 0) + die(_("fetch.parallel cannot be negative")); + return 0; + } + return git_default_config(k, v, cb); } @@ -123,6 +136,8 @@ static struct option builtin_fetch_options[] = { OPT__VERBOSITY(&verbosity), OPT_BOOL(0, "all", &all, N_("fetch from all remotes")), + OPT_BOOL(0, "set-upstream", &set_upstream, + N_("set upstream for git pull/fetch")), OPT_BOOL('a', "append", &append, N_("append to .git/FETCH_HEAD instead of overwriting")), OPT_STRING(0, "upload-pack", &upload_pack, N_("path"), @@ -134,7 +149,7 @@ static struct option builtin_fetch_options[] = { N_("fetch all tags and associated objects"), TAGS_SET), OPT_SET_INT('n', NULL, &tags, N_("do not fetch all tags (--no-tags)"), TAGS_UNSET), - OPT_INTEGER('j', "jobs", &max_children, + OPT_INTEGER('j', "jobs", &max_jobs, N_("number of submodules fetched in parallel")), OPT_BOOL('p', "prune", &prune, N_("prune remote-tracking branches no longer on remote")), @@ -239,32 +254,31 @@ static void add_merge_config(struct ref **head, } } -static int will_fetch(struct ref **head, const unsigned char *sha1) +static void create_fetch_oidset(struct ref **head, struct oidset *out) { struct ref *rm = *head; while (rm) { - if (hasheq(rm->old_oid.hash, sha1)) - return 1; + oidset_insert(out, &rm->old_oid); rm = rm->next; } - return 0; } struct refname_hash_entry { - struct hashmap_entry ent; /* must be the first member */ + struct hashmap_entry ent; struct object_id oid; int ignore; char refname[FLEX_ARRAY]; }; static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data, - const void *e1_, - const void *e2_, + const struct hashmap_entry *eptr, + const struct hashmap_entry *entry_or_key, const void *keydata) { - const struct refname_hash_entry *e1 = e1_; - const struct refname_hash_entry *e2 = e2_; + const struct refname_hash_entry *e1, *e2; + e1 = container_of(eptr, const struct refname_hash_entry, ent); + e2 = container_of(entry_or_key, const struct refname_hash_entry, ent); return strcmp(e1->refname, keydata ? keydata : e2->refname); } @@ -276,9 +290,9 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map, size_t len = strlen(refname); FLEX_ALLOC_MEM(ent, refname, refname, len); - hashmap_entry_init(ent, strhash(refname)); + hashmap_entry_init(&ent->ent, strhash(refname)); oidcpy(&ent->oid, oid); - hashmap_add(map, ent); + hashmap_add(map, &ent->ent); return ent; } @@ -313,6 +327,7 @@ static void find_non_local_tags(const struct ref *refs, { struct hashmap existing_refs; struct hashmap remote_refs; + struct oidset fetch_oids = OIDSET_INIT; struct string_list remote_refs_list = STRING_LIST_INIT_NODUP; struct string_list_item *remote_ref_item; const struct ref *ref; @@ -320,6 +335,7 @@ static void find_non_local_tags(const struct ref *refs, refname_hash_init(&existing_refs); refname_hash_init(&remote_refs); + create_fetch_oidset(head, &fetch_oids); for_each_ref(add_one_refname, &existing_refs); for (ref = refs; ref; ref = ref->next) { @@ -336,9 +352,9 @@ static void find_non_local_tags(const struct ref *refs, if (item && !has_object_file_with_flags(&ref->old_oid, OBJECT_INFO_QUICK) && - !will_fetch(head, ref->old_oid.hash) && + !oidset_contains(&fetch_oids, &ref->old_oid) && !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && - !will_fetch(head, item->oid.hash)) + !oidset_contains(&fetch_oids, &item->oid)) clear_item(item); item = NULL; continue; @@ -352,7 +368,7 @@ static void find_non_local_tags(const struct ref *refs, */ if (item && !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && - !will_fetch(head, item->oid.hash)) + !oidset_contains(&fetch_oids, &item->oid)) clear_item(item); item = NULL; @@ -365,7 +381,7 @@ static void find_non_local_tags(const struct ref *refs, item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid); string_list_insert(&remote_refs_list, ref->name); } - hashmap_free(&existing_refs, 1); + hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent); /* * We may have a final lightweight tag that needs to be @@ -373,7 +389,7 @@ static void find_non_local_tags(const struct ref *refs, */ if (item && !has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) && - !will_fetch(head, item->oid.hash)) + !oidset_contains(&fetch_oids, &item->oid)) clear_item(item); /* @@ -383,8 +399,10 @@ static void find_non_local_tags(const struct ref *refs, for_each_string_list_item(remote_ref_item, &remote_refs_list) { const char *refname = remote_ref_item->string; struct ref *rm; + unsigned int hash = strhash(refname); - item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname); + item = hashmap_get_entry_from_hash(&remote_refs, hash, refname, + struct refname_hash_entry, ent); if (!item) BUG("unseen remote ref?"); @@ -398,8 +416,9 @@ static void find_non_local_tags(const struct ref *refs, **tail = rm; *tail = &rm->next; } - hashmap_free(&remote_refs, 1); + hashmap_free_entries(&remote_refs, struct refname_hash_entry, ent); string_list_clear(&remote_refs_list, 0); + oidset_clear(&fetch_oids); } static struct ref *get_ref_map(struct remote *remote, @@ -516,17 +535,18 @@ static struct ref *get_ref_map(struct remote *remote, if (rm->peer_ref) { const char *refname = rm->peer_ref->name; struct refname_hash_entry *peer_item; + unsigned int hash = strhash(refname); - peer_item = hashmap_get_from_hash(&existing_refs, - strhash(refname), - refname); + peer_item = hashmap_get_entry_from_hash(&existing_refs, + hash, refname, + struct refname_hash_entry, ent); if (peer_item) { struct object_id *old_oid = &peer_item->oid; oidcpy(&rm->peer_ref->old_oid, old_oid); } } } - hashmap_free(&existing_refs, 1); + hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent); return ref_map; } @@ -1065,8 +1085,11 @@ static int check_exist_and_connected(struct ref *ref_map) static int fetch_refs(struct transport *transport, struct ref *ref_map) { int ret = check_exist_and_connected(ref_map); - if (ret) + if (ret) { + trace2_region_enter("fetch", "fetch_refs", the_repository); ret = transport_fetch_refs(transport, ref_map); + trace2_region_leave("fetch", "fetch_refs", the_repository); + } if (!ret) /* * Keep the new pack's ".keep" file around to allow the caller @@ -1082,11 +1105,14 @@ static int consume_refs(struct transport *transport, struct ref *ref_map) { int connectivity_checked = transport->smart_options ? transport->smart_options->connectivity_checked : 0; - int ret = store_updated_refs(transport->url, - transport->remote->name, - connectivity_checked, - ref_map); + int ret; + trace2_region_enter("fetch", "consume_refs", the_repository); + ret = store_updated_refs(transport->url, + transport->remote->name, + connectivity_checked, + ref_map); transport_unlock_pack(transport); + trace2_region_leave("fetch", "consume_refs", the_repository); return ret; } @@ -1238,13 +1264,10 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) if (update_shallow) set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes"); if (filter_options.choice) { - struct strbuf expanded_filter_spec = STRBUF_INIT; - expand_list_objects_filter_spec(&filter_options, - &expanded_filter_spec); - set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, - expanded_filter_spec.buf); + const char *spec = + expand_list_objects_filter_spec(&filter_options); + set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, spec); set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); - strbuf_release(&expanded_filter_spec); } if (negotiation_tip.nr) { if (transport->smart_options) @@ -1334,9 +1357,11 @@ static int do_fetch(struct transport *transport, argv_array_push(&ref_prefixes, "refs/tags/"); } - if (must_list_refs) + if (must_list_refs) { + trace2_region_enter("fetch", "remote_refs", the_repository); remote_refs = transport_get_remote_refs(transport, &ref_prefixes); - else + trace2_region_leave("fetch", "remote_refs", the_repository); + } else remote_refs = NULL; argv_array_clear(&ref_prefixes); @@ -1367,6 +1392,51 @@ static int do_fetch(struct transport *transport, retcode = 1; goto cleanup; } + + if (set_upstream) { + struct branch *branch = branch_get("HEAD"); + struct ref *rm; + struct ref *source_ref = NULL; + + /* + * We're setting the upstream configuration for the + * current branch. The relevent upstream is the + * fetched branch that is meant to be merged with the + * current one, i.e. the one fetched to FETCH_HEAD. + * + * When there are several such branches, consider the + * request ambiguous and err on the safe side by doing + * nothing and just emit a warning. + */ + for (rm = ref_map; rm; rm = rm->next) { + if (!rm->peer_ref) { + if (source_ref) { + warning(_("multiple branch detected, incompatible with --set-upstream")); + goto skip; + } else { + source_ref = rm; + } + } + } + if (source_ref) { + if (!strcmp(source_ref->name, "HEAD") || + starts_with(source_ref->name, "refs/heads/")) + install_branch_config(0, + branch->name, + transport->remote->name, + source_ref->name); + else if (starts_with(source_ref->name, "refs/remotes/")) + warning(_("not setting upstream for a remote remote-tracking branch")); + else if (starts_with(source_ref->name, "refs/tags/")) + warning(_("not setting upstream for a remote tag")); + else + warning(_("unknown branch type")); + } else { + warning(_("no source branch found.\n" + "you need to specify exactly one branch with the --set-upstream option.")); + } + } + skip: free_refs(ref_map); /* if neither --no-tags nor --tags was specified, do automated tag @@ -1463,7 +1533,62 @@ static void add_options_to_argv(struct argv_array *argv) } -static int fetch_multiple(struct string_list *list) +/* Fetch multiple remotes in parallel */ + +struct parallel_fetch_state { + const char **argv; + struct string_list *remotes; + int next, result; +}; + +static int fetch_next_remote(struct child_process *cp, struct strbuf *out, + void *cb, void **task_cb) +{ + struct parallel_fetch_state *state = cb; + char *remote; + + if (state->next < 0 || state->next >= state->remotes->nr) + return 0; + + remote = state->remotes->items[state->next++].string; + *task_cb = remote; + + argv_array_pushv(&cp->args, state->argv); + argv_array_push(&cp->args, remote); + cp->git_cmd = 1; + + if (verbosity >= 0) + printf(_("Fetching %s\n"), remote); + + return 1; +} + +static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb) +{ + struct parallel_fetch_state *state = cb; + const char *remote = task_cb; + + state->result = error(_("Could not fetch %s"), remote); + + return 0; +} + +static int fetch_finished(int result, struct strbuf *out, + void *cb, void *task_cb) +{ + struct parallel_fetch_state *state = cb; + const char *remote = task_cb; + + if (result) { + strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"), + remote, result); + state->result = -1; + } + + return 0; +} + +static int fetch_multiple(struct string_list *list, int max_children) { int i, result = 0; struct argv_array argv = ARGV_ARRAY_INIT; @@ -1477,20 +1602,34 @@ static int fetch_multiple(struct string_list *list) argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc", NULL); add_options_to_argv(&argv); - for (i = 0; i < list->nr; i++) { - const char *name = list->items[i].string; - argv_array_push(&argv, name); - if (verbosity >= 0) - printf(_("Fetching %s\n"), name); - if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) { - error(_("Could not fetch %s"), name); - result = 1; + if (max_children != 1 && list->nr != 1) { + struct parallel_fetch_state state = { argv.argv, list, 0, 0 }; + + argv_array_push(&argv, "--end-of-options"); + result = run_processes_parallel_tr2(max_children, + &fetch_next_remote, + &fetch_failed_to_start, + &fetch_finished, + &state, + "fetch", "parallel/fetch"); + + if (!result) + result = state.result; + } else + for (i = 0; i < list->nr; i++) { + const char *name = list->items[i].string; + argv_array_push(&argv, name); + if (verbosity >= 0) + printf(_("Fetching %s\n"), name); + if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) { + error(_("Could not fetch %s"), name); + result = 1; + } + argv_array_pop(&argv); } - argv_array_pop(&argv); - } argv_array_clear(&argv); - return result; + return !!result; } /* @@ -1510,37 +1649,27 @@ static inline void fetch_one_setup_partial(struct remote *remote) * If no prior partial clone/fetch and the current fetch DID NOT * request a partial-fetch, do a normal fetch. */ - if (!repository_format_partial_clone && !filter_options.choice) + if (!has_promisor_remote() && !filter_options.choice) return; /* - * If this is the FIRST partial-fetch request, we enable partial - * on this repo and remember the given filter-spec as the default - * for subsequent fetches to this remote. + * If this is a partial-fetch request, we enable partial on + * this repo if not already enabled and remember the given + * filter-spec as the default for subsequent fetches to this + * remote. */ - if (!repository_format_partial_clone && filter_options.choice) { + if (filter_options.choice) { partial_clone_register(remote->name, &filter_options); return; } /* - * We are currently limited to only ONE promisor remote and only - * allow partial-fetches from the promisor remote. - */ - if (strcmp(remote->name, repository_format_partial_clone)) { - if (filter_options.choice) - die(_("--filter can only be used with the remote " - "configured in extensions.partialClone")); - return; - } - - /* * Do a partial-fetch from the promisor remote using either the * explicitly given filter-spec or inherit the filter-spec from * the config. */ if (!filter_options.choice) - partial_clone_get_default_filter_spec(&filter_options); + partial_clone_get_default_filter_spec(&filter_options, remote->name); return; } @@ -1626,14 +1755,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) packet_trace_identity("fetch"); - fetch_if_missing = 0; - /* Record the command line for the reflog */ strbuf_addstr(&default_rla, "fetch"); for (i = 1; i < argc; i++) strbuf_addf(&default_rla, " %s", argv[i]); - fetch_config_from_gitmodules(&max_children, &recurse_submodules); + fetch_config_from_gitmodules(&submodule_fetch_jobs_config, + &recurse_submodules); git_config(git_fetch_config, NULL); argc = parse_options(argc, argv, prefix, @@ -1661,7 +1789,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) if (depth || deepen_since || deepen_not.nr) deepen = 1; - if (filter_options.choice && !repository_format_partial_clone) + if (filter_options.choice && !has_promisor_remote()) die("--filter can only be used when extensions.partialClone is set"); if (all) { @@ -1694,20 +1822,34 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) } } + fetch_if_missing = 0; + if (remote) { - if (filter_options.choice || repository_format_partial_clone) + if (filter_options.choice || has_promisor_remote()) fetch_one_setup_partial(remote); result = fetch_one(remote, argc, argv, prune_tags_ok); } else { + int max_children = max_jobs; + if (filter_options.choice) die(_("--filter can only be used with the remote " "configured in extensions.partialclone")); + + if (max_children < 0) + max_children = fetch_parallel_config; + /* TODO should this also die if we have a previous partial-clone? */ - result = fetch_multiple(&list); + result = fetch_multiple(&list, max_children); } if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) { struct argv_array options = ARGV_ARRAY_INIT; + int max_children = max_jobs; + + if (max_children < 0) + max_children = submodule_fetch_jobs_config; + if (max_children < 0) + max_children = fetch_parallel_config; add_options_to_argv(&options); result = fetch_populated_submodules(the_repository, @@ -1722,6 +1864,20 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) string_list_clear(&list, 0); + prepare_repo_settings(the_repository); + if (the_repository->settings.fetch_write_commit_graph) { + int commit_graph_flags = COMMIT_GRAPH_WRITE_SPLIT; + struct split_commit_graph_opts split_opts; + memset(&split_opts, 0, sizeof(struct split_commit_graph_opts)); + + if (progress) + commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS; + + write_commit_graph_reachable(get_object_directory(), + commit_graph_flags, + &split_opts); + } + close_object_store(the_repository->objects); if (enable_auto_gc) { |