summaryrefslogtreecommitdiff
path: root/builtin/fetch.c
diff options
context:
space:
mode:
Diffstat (limited to 'builtin/fetch.c')
-rw-r--r--builtin/fetch.c200
1 files changed, 152 insertions, 48 deletions
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 67c0eb88c6..863c858fde 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -7,6 +7,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-store.h"
+#include "oidset.h"
#include "commit.h"
#include "builtin.h"
#include "string-list.h"
@@ -58,7 +59,8 @@ static int verbosity, deepen_relative, set_upstream;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
-static int max_children = 1;
+static int max_jobs = -1, submodule_fetch_jobs_config = -1;
+static int fetch_parallel_config = 1;
static enum transport_family family;
static const char *depth;
static const char *deepen_since;
@@ -100,13 +102,20 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
}
if (!strcmp(k, "submodule.fetchjobs")) {
- max_children = parse_submodule_fetchjobs(k, v);
+ submodule_fetch_jobs_config = parse_submodule_fetchjobs(k, v);
return 0;
} else if (!strcmp(k, "fetch.recursesubmodules")) {
recurse_submodules = parse_fetch_recurse_submodules_arg(k, v);
return 0;
}
+ if (!strcmp(k, "fetch.parallel")) {
+ fetch_parallel_config = git_config_int(k, v);
+ if (fetch_parallel_config < 0)
+ die(_("fetch.parallel cannot be negative"));
+ return 0;
+ }
+
return git_default_config(k, v, cb);
}
@@ -140,7 +149,7 @@ static struct option builtin_fetch_options[] = {
N_("fetch all tags and associated objects"), TAGS_SET),
OPT_SET_INT('n', NULL, &tags,
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
- OPT_INTEGER('j', "jobs", &max_children,
+ OPT_INTEGER('j', "jobs", &max_jobs,
N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
@@ -245,32 +254,31 @@ static void add_merge_config(struct ref **head,
}
}
-static int will_fetch(struct ref **head, const unsigned char *sha1)
+static void create_fetch_oidset(struct ref **head, struct oidset *out)
{
struct ref *rm = *head;
while (rm) {
- if (hasheq(rm->old_oid.hash, sha1))
- return 1;
+ oidset_insert(out, &rm->old_oid);
rm = rm->next;
}
- return 0;
}
struct refname_hash_entry {
- struct hashmap_entry ent; /* must be the first member */
+ struct hashmap_entry ent;
struct object_id oid;
int ignore;
char refname[FLEX_ARRAY];
};
static int refname_hash_entry_cmp(const void *hashmap_cmp_fn_data,
- const void *e1_,
- const void *e2_,
+ const struct hashmap_entry *eptr,
+ const struct hashmap_entry *entry_or_key,
const void *keydata)
{
- const struct refname_hash_entry *e1 = e1_;
- const struct refname_hash_entry *e2 = e2_;
+ const struct refname_hash_entry *e1, *e2;
+ e1 = container_of(eptr, const struct refname_hash_entry, ent);
+ e2 = container_of(entry_or_key, const struct refname_hash_entry, ent);
return strcmp(e1->refname, keydata ? keydata : e2->refname);
}
@@ -282,9 +290,9 @@ static struct refname_hash_entry *refname_hash_add(struct hashmap *map,
size_t len = strlen(refname);
FLEX_ALLOC_MEM(ent, refname, refname, len);
- hashmap_entry_init(ent, strhash(refname));
+ hashmap_entry_init(&ent->ent, strhash(refname));
oidcpy(&ent->oid, oid);
- hashmap_add(map, ent);
+ hashmap_add(map, &ent->ent);
return ent;
}
@@ -319,6 +327,7 @@ static void find_non_local_tags(const struct ref *refs,
{
struct hashmap existing_refs;
struct hashmap remote_refs;
+ struct oidset fetch_oids = OIDSET_INIT;
struct string_list remote_refs_list = STRING_LIST_INIT_NODUP;
struct string_list_item *remote_ref_item;
const struct ref *ref;
@@ -326,6 +335,7 @@ static void find_non_local_tags(const struct ref *refs,
refname_hash_init(&existing_refs);
refname_hash_init(&remote_refs);
+ create_fetch_oidset(head, &fetch_oids);
for_each_ref(add_one_refname, &existing_refs);
for (ref = refs; ref; ref = ref->next) {
@@ -342,9 +352,9 @@ static void find_non_local_tags(const struct ref *refs,
if (item &&
!has_object_file_with_flags(&ref->old_oid,
OBJECT_INFO_QUICK) &&
- !will_fetch(head, ref->old_oid.hash) &&
+ !oidset_contains(&fetch_oids, &ref->old_oid) &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
continue;
@@ -358,7 +368,7 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
@@ -371,7 +381,7 @@ static void find_non_local_tags(const struct ref *refs,
item = refname_hash_add(&remote_refs, ref->name, &ref->old_oid);
string_list_insert(&remote_refs_list, ref->name);
}
- hashmap_free(&existing_refs, 1);
+ hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
/*
* We may have a final lightweight tag that needs to be
@@ -379,7 +389,7 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (item &&
!has_object_file_with_flags(&item->oid, OBJECT_INFO_QUICK) &&
- !will_fetch(head, item->oid.hash))
+ !oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
/*
@@ -389,8 +399,10 @@ static void find_non_local_tags(const struct ref *refs,
for_each_string_list_item(remote_ref_item, &remote_refs_list) {
const char *refname = remote_ref_item->string;
struct ref *rm;
+ unsigned int hash = strhash(refname);
- item = hashmap_get_from_hash(&remote_refs, strhash(refname), refname);
+ item = hashmap_get_entry_from_hash(&remote_refs, hash, refname,
+ struct refname_hash_entry, ent);
if (!item)
BUG("unseen remote ref?");
@@ -404,8 +416,9 @@ static void find_non_local_tags(const struct ref *refs,
**tail = rm;
*tail = &rm->next;
}
- hashmap_free(&remote_refs, 1);
+ hashmap_free_entries(&remote_refs, struct refname_hash_entry, ent);
string_list_clear(&remote_refs_list, 0);
+ oidset_clear(&fetch_oids);
}
static struct ref *get_ref_map(struct remote *remote,
@@ -522,17 +535,18 @@ static struct ref *get_ref_map(struct remote *remote,
if (rm->peer_ref) {
const char *refname = rm->peer_ref->name;
struct refname_hash_entry *peer_item;
+ unsigned int hash = strhash(refname);
- peer_item = hashmap_get_from_hash(&existing_refs,
- strhash(refname),
- refname);
+ peer_item = hashmap_get_entry_from_hash(&existing_refs,
+ hash, refname,
+ struct refname_hash_entry, ent);
if (peer_item) {
struct object_id *old_oid = &peer_item->oid;
oidcpy(&rm->peer_ref->old_oid, old_oid);
}
}
}
- hashmap_free(&existing_refs, 1);
+ hashmap_free_entries(&existing_refs, struct refname_hash_entry, ent);
return ref_map;
}
@@ -1071,8 +1085,11 @@ static int check_exist_and_connected(struct ref *ref_map)
static int fetch_refs(struct transport *transport, struct ref *ref_map)
{
int ret = check_exist_and_connected(ref_map);
- if (ret)
+ if (ret) {
+ trace2_region_enter("fetch", "fetch_refs", the_repository);
ret = transport_fetch_refs(transport, ref_map);
+ trace2_region_leave("fetch", "fetch_refs", the_repository);
+ }
if (!ret)
/*
* Keep the new pack's ".keep" file around to allow the caller
@@ -1088,11 +1105,14 @@ static int consume_refs(struct transport *transport, struct ref *ref_map)
{
int connectivity_checked = transport->smart_options
? transport->smart_options->connectivity_checked : 0;
- int ret = store_updated_refs(transport->url,
- transport->remote->name,
- connectivity_checked,
- ref_map);
+ int ret;
+ trace2_region_enter("fetch", "consume_refs", the_repository);
+ ret = store_updated_refs(transport->url,
+ transport->remote->name,
+ connectivity_checked,
+ ref_map);
transport_unlock_pack(transport);
+ trace2_region_leave("fetch", "consume_refs", the_repository);
return ret;
}
@@ -1337,9 +1357,11 @@ static int do_fetch(struct transport *transport,
argv_array_push(&ref_prefixes, "refs/tags/");
}
- if (must_list_refs)
+ if (must_list_refs) {
+ trace2_region_enter("fetch", "remote_refs", the_repository);
remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
- else
+ trace2_region_leave("fetch", "remote_refs", the_repository);
+ } else
remote_refs = NULL;
argv_array_clear(&ref_prefixes);
@@ -1511,7 +1533,62 @@ static void add_options_to_argv(struct argv_array *argv)
}
-static int fetch_multiple(struct string_list *list)
+/* Fetch multiple remotes in parallel */
+
+struct parallel_fetch_state {
+ const char **argv;
+ struct string_list *remotes;
+ int next, result;
+};
+
+static int fetch_next_remote(struct child_process *cp, struct strbuf *out,
+ void *cb, void **task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ char *remote;
+
+ if (state->next < 0 || state->next >= state->remotes->nr)
+ return 0;
+
+ remote = state->remotes->items[state->next++].string;
+ *task_cb = remote;
+
+ argv_array_pushv(&cp->args, state->argv);
+ argv_array_push(&cp->args, remote);
+ cp->git_cmd = 1;
+
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), remote);
+
+ return 1;
+}
+
+static int fetch_failed_to_start(struct strbuf *out, void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ state->result = error(_("Could not fetch %s"), remote);
+
+ return 0;
+}
+
+static int fetch_finished(int result, struct strbuf *out,
+ void *cb, void *task_cb)
+{
+ struct parallel_fetch_state *state = cb;
+ const char *remote = task_cb;
+
+ if (result) {
+ strbuf_addf(out, _("could not fetch '%s' (exit code: %d)\n"),
+ remote, result);
+ state->result = -1;
+ }
+
+ return 0;
+}
+
+static int fetch_multiple(struct string_list *list, int max_children)
{
int i, result = 0;
struct argv_array argv = ARGV_ARRAY_INIT;
@@ -1525,20 +1602,34 @@ static int fetch_multiple(struct string_list *list)
argv_array_pushl(&argv, "fetch", "--append", "--no-auto-gc", NULL);
add_options_to_argv(&argv);
- for (i = 0; i < list->nr; i++) {
- const char *name = list->items[i].string;
- argv_array_push(&argv, name);
- if (verbosity >= 0)
- printf(_("Fetching %s\n"), name);
- if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
- error(_("Could not fetch %s"), name);
- result = 1;
+ if (max_children != 1 && list->nr != 1) {
+ struct parallel_fetch_state state = { argv.argv, list, 0, 0 };
+
+ argv_array_push(&argv, "--end-of-options");
+ result = run_processes_parallel_tr2(max_children,
+ &fetch_next_remote,
+ &fetch_failed_to_start,
+ &fetch_finished,
+ &state,
+ "fetch", "parallel/fetch");
+
+ if (!result)
+ result = state.result;
+ } else
+ for (i = 0; i < list->nr; i++) {
+ const char *name = list->items[i].string;
+ argv_array_push(&argv, name);
+ if (verbosity >= 0)
+ printf(_("Fetching %s\n"), name);
+ if (run_command_v_opt(argv.argv, RUN_GIT_CMD)) {
+ error(_("Could not fetch %s"), name);
+ result = 1;
+ }
+ argv_array_pop(&argv);
}
- argv_array_pop(&argv);
- }
argv_array_clear(&argv);
- return result;
+ return !!result;
}
/*
@@ -1664,14 +1755,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
packet_trace_identity("fetch");
- fetch_if_missing = 0;
-
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
for (i = 1; i < argc; i++)
strbuf_addf(&default_rla, " %s", argv[i]);
- fetch_config_from_gitmodules(&max_children, &recurse_submodules);
+ fetch_config_from_gitmodules(&submodule_fetch_jobs_config,
+ &recurse_submodules);
git_config(git_fetch_config, NULL);
argc = parse_options(argc, argv, prefix,
@@ -1732,20 +1822,34 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
}
+ fetch_if_missing = 0;
+
if (remote) {
if (filter_options.choice || has_promisor_remote())
fetch_one_setup_partial(remote);
result = fetch_one(remote, argc, argv, prune_tags_ok);
} else {
+ int max_children = max_jobs;
+
if (filter_options.choice)
die(_("--filter can only be used with the remote "
"configured in extensions.partialclone"));
+
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
+
/* TODO should this also die if we have a previous partial-clone? */
- result = fetch_multiple(&list);
+ result = fetch_multiple(&list, max_children);
}
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
+ int max_children = max_jobs;
+
+ if (max_children < 0)
+ max_children = submodule_fetch_jobs_config;
+ if (max_children < 0)
+ max_children = fetch_parallel_config;
add_options_to_argv(&options);
result = fetch_populated_submodules(the_repository,