diff options
Diffstat (limited to 'unpack-trees.c')
-rw-r--r-- | unpack-trees.c | 668 |
1 files changed, 474 insertions, 194 deletions
diff --git a/unpack-trees.c b/unpack-trees.c index 88a0b5d250..1ecdab3304 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1,5 +1,5 @@ -#define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" +#include "argv-array.h" #include "repository.h" #include "config.h" #include "dir.h" @@ -11,11 +11,11 @@ #include "refs.h" #include "attr.h" #include "split-index.h" -#include "dir.h" #include "submodule.h" #include "submodule-config.h" #include "fsmonitor.h" -#include "fetch-object.h" +#include "object-store.h" +#include "promisor-remote.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -103,6 +103,8 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, const char **msgs = opts->msgs; const char *msg; + argv_array_init(&opts->msgs_to_free); + if (!strcmp(cmd, "checkout")) msg = advice_commit_before_merge ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" @@ -119,7 +121,7 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, "Please commit your changes or stash them before you %s.") : _("Your local changes to the following files would be overwritten by %s:\n%%s"); msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = - xstrfmt(msg, cmd, cmd); + argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); msgs[ERROR_NOT_UPTODATE_DIR] = _("Updating the following directories would lose untracked files in them:\n%s"); @@ -139,7 +141,8 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, ? _("The following untracked working tree files would be removed by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be removed by %s:\n%%s"); - msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd); + msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = + argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); if (!strcmp(cmd, "checkout")) msg = advice_commit_before_merge @@ -156,7 +159,8 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, ? _("The following untracked working tree files would be overwritten by %s:\n%%s" "Please move or remove them before you %s.") : _("The following untracked working tree files would be overwritten by %s:\n%%s"); - msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd); + msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = + argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); /* * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we @@ -179,6 +183,12 @@ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, opts->unpack_rejects[i].strdup_strings = 1; } +void clear_unpack_trees_porcelain(struct unpack_trees_options *opts) +{ + argv_array_clear(&opts->msgs_to_free); + memset(opts->msgs, 0, sizeof(opts->msgs)); +} + static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, unsigned int set, unsigned int clear) { @@ -192,20 +202,11 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); } -static struct cache_entry *dup_entry(const struct cache_entry *ce) -{ - unsigned int size = ce_size(ce); - struct cache_entry *new_entry = xmalloc(size); - - memcpy(new_entry, ce, size); - return new_entry; -} - static void add_entry(struct unpack_trees_options *o, const struct cache_entry *ce, unsigned int set, unsigned int clear) { - do_add_entry(o, dup_entry(ce), set, clear); + do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear); } /* @@ -217,6 +218,9 @@ static int add_rejected_path(struct unpack_trees_options *o, enum unpack_trees_error_types e, const char *path) { + if (o->quiet) + return -1; + if (!o->show_all_errors) return error(ERRORMSG(o, e), super_prefixed(path)); @@ -266,15 +270,14 @@ static int check_submodule_move_head(const struct cache_entry *ce, flags |= SUBMODULE_MOVE_HEAD_FORCE; if (submodule_move_head(ce->name, old_id, new_id, flags)) - return o->gently ? -1 : - add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); + return add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); return 0; } /* - * Preform the loading of the repository's gitmodules file. This function is + * Perform the loading of the repository's gitmodules file. This function is * used by 'check_update()' to perform loading of the gitmodules file in two - * differnt situations: + * different situations: * (1) before removing entries from the working tree if the gitmodules file has * been marked for removal. This situation is specified by 'state' == NULL. * (2) before checking out entries to the working tree if the gitmodules file @@ -288,34 +291,15 @@ static void load_gitmodules_file(struct index_state *index, if (pos >= 0) { struct cache_entry *ce = index->cache[pos]; if (!state && ce->ce_flags & CE_WT_REMOVE) { - repo_read_gitmodules(the_repository); + repo_read_gitmodules(the_repository, 0); } else if (state && (ce->ce_flags & CE_UPDATE)) { - submodule_free(); - checkout_entry(ce, state, NULL); - repo_read_gitmodules(the_repository); + submodule_free(the_repository); + checkout_entry(ce, state, NULL, NULL); + repo_read_gitmodules(the_repository, 0); } } } -/* - * Unlink the last component and schedule the leading directories for - * removal, such that empty directories get removed. - */ -static void unlink_entry(const struct cache_entry *ce) -{ - const struct submodule *sub = submodule_from_ce(ce); - if (sub) { - /* state.force is set at the caller. */ - submodule_move_head(ce->name, "HEAD", NULL, - SUBMODULE_MOVE_HEAD_FORCE); - } - if (!check_leading_path(ce->name, ce_namelen(ce))) - return; - if (remove_or_warn(ce->ce_mode, ce->name)) - return; - schedule_dir_for_removal(ce->name, ce_namelen(ce)); -} - static struct progress *get_progress(struct unpack_trees_options *o) { unsigned cnt = 0, total = 0; @@ -330,29 +314,78 @@ static struct progress *get_progress(struct unpack_trees_options *o) total++; } - return start_delayed_progress(_("Checking out files"), total); + return start_delayed_progress(_("Updating files"), total); +} + +static void setup_collided_checkout_detection(struct checkout *state, + struct index_state *index) +{ + int i; + + state->clone = 1; + for (i = 0; i < index->cache_nr; i++) + index->cache[i]->ce_flags &= ~CE_MATCHED; +} + +static void report_collided_checkout(struct index_state *index) +{ + struct string_list list = STRING_LIST_INIT_NODUP; + int i; + + for (i = 0; i < index->cache_nr; i++) { + struct cache_entry *ce = index->cache[i]; + + if (!(ce->ce_flags & CE_MATCHED)) + continue; + + string_list_append(&list, ce->name); + ce->ce_flags &= ~CE_MATCHED; + } + + list.cmp = fspathcmp; + string_list_sort(&list); + + if (list.nr) { + warning(_("the following paths have collided (e.g. case-sensitive paths\n" + "on a case-insensitive filesystem) and only one from the same\n" + "colliding group is in the working tree:\n")); + + for (i = 0; i < list.nr; i++) + fprintf(stderr, " '%s'\n", list.items[i].string); + } + + string_list_clear(&list, 0); } static int check_updates(struct unpack_trees_options *o) { unsigned cnt = 0; int errs = 0; - struct progress *progress = NULL; + struct progress *progress; struct index_state *index = &o->result; struct checkout state = CHECKOUT_INIT; int i; + trace_performance_enter(); state.force = 1; state.quiet = 1; state.refresh_cache = 1; state.istate = index; + if (!o->update || o->dry_run) { + remove_marked_cache_entries(index, 0); + trace_performance_leave("check_updates"); + return 0; + } + + if (o->clone) + setup_collided_checkout_detection(&state, index); + progress = get_progress(o); - if (o->update) - git_attr_set_direction(GIT_ATTR_CHECKOUT, index); + git_attr_set_direction(GIT_ATTR_CHECKOUT); - if (should_update_submodules() && o->update && !o->dry_run) + if (should_update_submodules()) load_gitmodules_file(index, NULL); for (i = 0; i < index->cache_nr; i++) { @@ -360,37 +393,38 @@ static int check_updates(struct unpack_trees_options *o) if (ce->ce_flags & CE_WT_REMOVE) { display_progress(progress, ++cnt); - if (o->update && !o->dry_run) - unlink_entry(ce); + unlink_entry(ce); } } - remove_marked_cache_entries(index); + + remove_marked_cache_entries(index, 0); remove_scheduled_dirs(); - if (should_update_submodules() && o->update && !o->dry_run) + if (should_update_submodules()) load_gitmodules_file(index, &state); enable_delayed_checkout(&state); - if (repository_format_partial_clone && o->update && !o->dry_run) { + if (has_promisor_remote()) { /* * Prefetch the objects that are to be checked out in the loop * below. */ struct oid_array to_fetch = OID_ARRAY_INIT; - int fetch_if_missing_store = fetch_if_missing; - fetch_if_missing = 0; for (i = 0; i < index->cache_nr; i++) { struct cache_entry *ce = index->cache[i]; - if ((ce->ce_flags & CE_UPDATE) && - !S_ISGITLINK(ce->ce_mode)) { - if (!has_object_file(&ce->oid)) - oid_array_append(&to_fetch, &ce->oid); - } + + if (!(ce->ce_flags & CE_UPDATE) || + S_ISGITLINK(ce->ce_mode)) + continue; + if (!oid_object_info_extended(the_repository, &ce->oid, + NULL, + OBJECT_INFO_FOR_PREFETCH)) + continue; + oid_array_append(&to_fetch, &ce->oid); } if (to_fetch.nr) - fetch_objects(repository_format_partial_clone, - &to_fetch); - fetch_if_missing = fetch_if_missing_store; + promisor_remote_get_direct(the_repository, + to_fetch.oid, to_fetch.nr); oid_array_clear(&to_fetch); } for (i = 0; i < index->cache_nr; i++) { @@ -398,19 +432,21 @@ static int check_updates(struct unpack_trees_options *o) if (ce->ce_flags & CE_UPDATE) { if (ce->ce_flags & CE_WT_REMOVE) - die("BUG: both update and delete flags are set on %s", + BUG("both update and delete flags are set on %s", ce->name); display_progress(progress, ++cnt); ce->ce_flags &= ~CE_UPDATE; - if (o->update && !o->dry_run) { - errs |= checkout_entry(ce, &state, NULL); - } + errs |= checkout_entry(ce, &state, NULL, NULL); } } stop_progress(&progress); - errs |= finish_delayed_checkout(&state); - if (o->update) - git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); + errs |= finish_delayed_checkout(&state, NULL); + git_attr_set_direction(GIT_ATTR_CHECKIN); + + if (o->clone) + report_collided_checkout(index); + + trace_performance_leave("check_updates"); return errs != 0; } @@ -597,7 +633,7 @@ static int unpack_index_entry(struct cache_entry *ce, return ret; } -static int find_cache_pos(struct traverse_info *, const struct name_entry *); +static int find_cache_pos(struct traverse_info *, const char *p, size_t len); static void restore_cache_bottom(struct traverse_info *info, int bottom) { @@ -616,7 +652,7 @@ static int switch_cache_bottom(struct traverse_info *info) if (o->diff_index_cached) return 0; ret = o->cache_bottom; - pos = find_cache_pos(info->prev, &info->name); + pos = find_cache_pos(info->prev, info->name, info->namelen); if (pos < -1) o->cache_bottom = -2 - pos; @@ -627,7 +663,113 @@ static int switch_cache_bottom(struct traverse_info *info) static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) { - return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); + return !is_null_oid(&name_j->oid) && !is_null_oid(&name_k->oid) && oideq(&name_j->oid, &name_k->oid); +} + +static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, + struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + int i; + + if (!o->merge || dirmask != ((1 << n) - 1)) + return 0; + + for (i = 1; i < n; i++) + if (!are_same_oid(names, names + i)) + return 0; + + return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); +} + +static int index_pos_by_traverse_info(struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + struct strbuf name = STRBUF_INIT; + int pos; + + strbuf_make_traverse_path(&name, info, names->path, names->pathlen); + strbuf_addch(&name, '/'); + pos = index_name_pos(o->src_index, name.buf, name.len); + if (pos >= 0) + BUG("This is a directory and should not exist in index"); + pos = -pos - 1; + if (pos >= o->src_index->cache_nr || + !starts_with(o->src_index->cache[pos]->name, name.buf) || + (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name.buf))) + BUG("pos %d doesn't point to the first entry of %s in index", + pos, name.buf); + strbuf_release(&name); + return pos; +} + +/* + * Fast path if we detect that all trees are the same as cache-tree at this + * path. We'll walk these trees in an iterative loop using cache-tree/index + * instead of ODB since we already know what these trees contain. + */ +static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, + struct traverse_info *info) +{ + struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; + struct unpack_trees_options *o = info->data; + struct cache_entry *tree_ce = NULL; + int ce_len = 0; + int i, d; + + if (!o->merge) + BUG("We need cache-tree to do this optimization"); + + /* + * Do what unpack_callback() and unpack_nondirectories() normally + * do. But we walk all paths in an iterative loop instead. + * + * D/F conflicts and higher stage entries are not a concern + * because cache-tree would be invalidated and we would never + * get here in the first place. + */ + for (i = 0; i < nr_entries; i++) { + int new_ce_len, len, rc; + + src[0] = o->src_index->cache[pos + i]; + + len = ce_namelen(src[0]); + new_ce_len = cache_entry_size(len); + + if (new_ce_len > ce_len) { + new_ce_len <<= 1; + tree_ce = xrealloc(tree_ce, new_ce_len); + memset(tree_ce, 0, new_ce_len); + ce_len = new_ce_len; + + tree_ce->ce_flags = create_ce_flags(0); + + for (d = 1; d <= nr_names; d++) + src[d] = tree_ce; + } + + tree_ce->ce_mode = src[0]->ce_mode; + tree_ce->ce_namelen = len; + oidcpy(&tree_ce->oid, &src[0]->oid); + memcpy(tree_ce->name, src[0]->name, len + 1); + + rc = call_unpack_fn((const struct cache_entry * const *)src, o); + if (rc < 0) { + free(tree_ce); + return rc; + } + + mark_ce_used(src[0], o); + } + free(tree_ce); + if (o->debug_unpack) + printf("Unpacked %d entries from %s to %s using cache-tree\n", + nr_entries, + o->src_index->cache[pos]->name, + o->src_index->cache[pos + nr_entries - 1]->name); + return 0; } static int traverse_trees_recursive(int n, unsigned long dirmask, @@ -635,12 +777,33 @@ static int traverse_trees_recursive(int n, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) { + struct unpack_trees_options *o = info->data; int i, ret, bottom; int nr_buf = 0; struct tree_desc t[MAX_UNPACK_TREES]; void *buf[MAX_UNPACK_TREES]; struct traverse_info newinfo; struct name_entry *p; + int nr_entries; + + nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info); + if (nr_entries > 0) { + int pos = index_pos_by_traverse_info(names, info); + + if (!o->merge || df_conflicts) + BUG("Wrong condition to get here buddy"); + + /* + * All entries up to 'pos' must have been processed + * (i.e. marked CE_UNPACKED) at this point. But to be safe, + * save and restore cache_bottom anyway to not miss + * unprocessed entries before 'pos'. + */ + bottom = o->cache_bottom; + ret = traverse_by_cache_tree(pos, nr_entries, n, info); + o->cache_bottom = bottom; + return ret; + } p = names; while (!p->mode) @@ -649,8 +812,10 @@ static int traverse_trees_recursive(int n, unsigned long dirmask, newinfo = *info; newinfo.prev = info; newinfo.pathspec = info->pathspec; - newinfo.name = *p; - newinfo.pathlen += tree_entry_len(p) + 1; + newinfo.name = p->path; + newinfo.namelen = p->pathlen; + newinfo.mode = p->mode; + newinfo.pathlen = st_add3(newinfo.pathlen, tree_entry_len(p), 1); newinfo.df_conflicts |= df_conflicts; /* @@ -677,13 +842,13 @@ static int traverse_trees_recursive(int n, unsigned long dirmask, else { const struct object_id *oid = NULL; if (dirmask & 1) - oid = names[i].oid; - buf[nr_buf++] = fill_tree_descriptor(t + i, oid); + oid = &names[i].oid; + buf[nr_buf++] = fill_tree_descriptor(the_repository, t + i, oid); } } bottom = switch_cache_bottom(&newinfo); - ret = traverse_trees(n, t, &newinfo); + ret = traverse_trees(o->src_index, n, t, &newinfo); restore_cache_bottom(&newinfo, bottom); for (i = 0; i < nr_buf; i++) @@ -701,14 +866,18 @@ static int traverse_trees_recursive(int n, unsigned long dirmask, * itself - the caller needs to do the final check for the cache * entry having more data at the end! */ -static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) +static int do_compare_entry_piecewise(const struct cache_entry *ce, + const struct traverse_info *info, + const char *name, size_t namelen, + unsigned mode) { - int len, pathlen, ce_len; + int pathlen, ce_len; const char *ce_name; if (info->prev) { int cmp = do_compare_entry_piecewise(ce, info->prev, - &info->name); + info->name, info->namelen, + info->mode); if (cmp) return cmp; } @@ -722,15 +891,15 @@ static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct ce_len -= pathlen; ce_name = ce->name + pathlen; - len = tree_entry_len(n); - return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); + return df_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); } static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, - const struct name_entry *n) + const char *name, size_t namelen, + unsigned mode) { - int len, pathlen, ce_len; + int pathlen, ce_len; const char *ce_name; int cmp; @@ -740,7 +909,7 @@ static int do_compare_entry(const struct cache_entry *ce, * it is quicker to use the precomputed version. */ if (!info->traverse_path) - return do_compare_entry_piecewise(ce, info, n); + return do_compare_entry_piecewise(ce, info, name, namelen, mode); cmp = strncmp(ce->name, info->traverse_path, info->pathlen); if (cmp) @@ -755,13 +924,12 @@ static int do_compare_entry(const struct cache_entry *ce, ce_len -= pathlen; ce_name = ce->name + pathlen; - len = tree_entry_len(n); - return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); + return df_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); } static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) { - int cmp = do_compare_entry(ce, info, n); + int cmp = do_compare_entry(ce, info, n->path, n->pathlen, n->mode); if (cmp) return cmp; @@ -769,7 +937,7 @@ static int compare_entry(const struct cache_entry *ce, const struct traverse_inf * Even if the beginning compared identically, the ce should * compare as bigger than a directory leading up to it! */ - return ce_namelen(ce) > traverse_path_len(info, n); + return ce_namelen(ce) > traverse_path_len(info, tree_entry_len(n)); } static int ce_in_traverse_path(const struct cache_entry *ce, @@ -777,7 +945,8 @@ static int ce_in_traverse_path(const struct cache_entry *ce, { if (!info->prev) return 1; - if (do_compare_entry(ce, info->prev, &info->name)) + if (do_compare_entry(ce, info->prev, + info->name, info->namelen, info->mode)) return 0; /* * If ce (blob) is the same name as the path (which is a tree @@ -786,20 +955,33 @@ static int ce_in_traverse_path(const struct cache_entry *ce, return (info->pathlen < ce_namelen(ce)); } -static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) +static struct cache_entry *create_ce_entry(const struct traverse_info *info, + const struct name_entry *n, + int stage, + struct index_state *istate, + int is_transient) { - int len = traverse_path_len(info, n); - struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); + size_t len = traverse_path_len(info, tree_entry_len(n)); + struct cache_entry *ce = + is_transient ? + make_empty_transient_cache_entry(len) : + make_empty_cache_entry(istate, len); ce->ce_mode = create_ce_mode(n->mode); ce->ce_flags = create_ce_flags(stage); ce->ce_namelen = len; - oidcpy(&ce->oid, n->oid); - make_traverse_path(ce->name, info, n); + oidcpy(&ce->oid, &n->oid); + /* len+1 because the cache_entry allocates space for NUL */ + make_traverse_path(ce->name, len + 1, info, n->path, n->pathlen); return ce; } +/* + * Note that traverse_by_cache_tree() duplicates some logic in this function + * without actually calling it. If you change the logic here you may need to + * check and change there as well. + */ static int unpack_nondirectories(int n, unsigned long mask, unsigned long dirmask, struct cache_entry **src, @@ -835,7 +1017,15 @@ static int unpack_nondirectories(int n, unsigned long mask, stage = 3; else stage = 2; - src[i + o->merge] = create_ce_entry(info, names + i, stage); + + /* + * If the merge bit is set, then the cache entries are + * discarded in the following block. In this case, + * construct "transient" cache_entries, as they are + * not stored in the index. otherwise construct the + * cache entry from the index aware logic. + */ + src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge); } if (o->merge) { @@ -844,7 +1034,7 @@ static int unpack_nondirectories(int n, unsigned long mask, for (i = 0; i < n; i++) { struct cache_entry *ce = src[i + o->merge]; if (ce != o->df_conflict_entry) - free(ce); + discard_cache_entry(ce); } return rc; } @@ -860,7 +1050,7 @@ static int unpack_nondirectories(int n, unsigned long mask, static int unpack_failed(struct unpack_trees_options *o, const char *message) { discard_index(&o->result); - if (!o->gently && !o->exiting_early) { + if (!o->quiet && !o->exiting_early) { if (message) return error("%s", message); return -1; @@ -875,13 +1065,12 @@ static int unpack_failed(struct unpack_trees_options *o, const char *message) * the directory. */ static int find_cache_pos(struct traverse_info *info, - const struct name_entry *p) + const char *p, size_t p_len) { int pos; struct unpack_trees_options *o = info->data; struct index_state *index = o->src_index; int pfxlen = info->pathlen; - int p_len = tree_entry_len(p); for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { const struct cache_entry *ce = index->cache[pos]; @@ -917,7 +1106,7 @@ static int find_cache_pos(struct traverse_info *info, ce_len = ce_slash - ce_name; else ce_len = ce_namelen(ce) - pfxlen; - cmp = name_compare(p->path, p_len, ce_name, ce_len); + cmp = name_compare(p, p_len, ce_name, ce_len); /* * Exact match; if we have a directory we need to * delay returning it. @@ -932,7 +1121,7 @@ static int find_cache_pos(struct traverse_info *info, * E.g. ce_name == "t-i", and p->path == "t"; we may * have "t/a" in the index. */ - if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) && + if (p_len < ce_len && !memcmp(ce_name, p, p_len) && ce_name[p_len] < '/') continue; /* keep looking */ break; @@ -943,7 +1132,7 @@ static int find_cache_pos(struct traverse_info *info, static struct cache_entry *find_cache_entry(struct traverse_info *info, const struct name_entry *p) { - int pos = find_cache_pos(info, p); + int pos = find_cache_pos(info, p->path, p->pathlen); struct unpack_trees_options *o = info->data; if (0 <= pos) @@ -956,10 +1145,10 @@ static void debug_path(struct traverse_info *info) { if (info->prev) { debug_path(info->prev); - if (*info->prev->name.path) + if (*info->prev->name) putchar('/'); } - printf("%s", info->name.path); + printf("%s", info->name); } static void debug_name_entry(int i, struct name_entry *n) @@ -984,6 +1173,11 @@ static void debug_unpack_callback(int n, debug_name_entry(i, names + i); } +/* + * Note that traverse_by_cache_tree() duplicates some logic in this function + * without actually calling it. If you change the logic here you may need to + * check and change there as well. + */ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) { struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; @@ -1074,29 +1268,38 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str return mask; } -static int clear_ce_flags_1(struct cache_entry **cache, int nr, +static int clear_ce_flags_1(struct index_state *istate, + struct cache_entry **cache, int nr, struct strbuf *prefix, int select_mask, int clear_mask, - struct exclude_list *el, int defval); + struct pattern_list *pl, + enum pattern_match_result default_match, + int progress_nr); /* Whole directory matching */ -static int clear_ce_flags_dir(struct cache_entry **cache, int nr, +static int clear_ce_flags_dir(struct index_state *istate, + struct cache_entry **cache, int nr, struct strbuf *prefix, char *basename, int select_mask, int clear_mask, - struct exclude_list *el, int defval) + struct pattern_list *pl, + enum pattern_match_result default_match, + int progress_nr) { struct cache_entry **cache_end; int dtype = DT_DIR; - int ret = is_excluded_from_list(prefix->buf, prefix->len, - basename, &dtype, el, &the_index); int rc; + enum pattern_match_result ret, orig_ret; + orig_ret = path_matches_pattern_list(prefix->buf, prefix->len, + basename, &dtype, pl, istate); strbuf_addch(prefix, '/'); /* If undecided, use matching result of parent dir in defval */ - if (ret < 0) - ret = defval; + if (orig_ret == UNDECIDED) + ret = default_match; + else + ret = orig_ret; for (cache_end = cache; cache_end != cache + nr; cache_end++) { struct cache_entry *ce = *cache_end; @@ -1104,24 +1307,31 @@ static int clear_ce_flags_dir(struct cache_entry **cache, int nr, break; } - /* - * TODO: check el, if there are no patterns that may conflict - * with ret (iow, we know in advance the incl/excl - * decision for the entire directory), clear flag here without - * calling clear_ce_flags_1(). That function will call - * the expensive is_excluded_from_list() on every entry. - */ - rc = clear_ce_flags_1(cache, cache_end - cache, - prefix, - select_mask, clear_mask, - el, ret); + if (pl->use_cone_patterns && orig_ret == MATCHED_RECURSIVE) { + struct cache_entry **ce = cache; + rc = cache_end - cache; + + while (ce < cache_end) { + (*ce)->ce_flags &= ~clear_mask; + ce++; + } + } else if (pl->use_cone_patterns && orig_ret == NOT_MATCHED) { + rc = cache_end - cache; + } else { + rc = clear_ce_flags_1(istate, cache, cache_end - cache, + prefix, + select_mask, clear_mask, + pl, ret, + progress_nr); + } + strbuf_setlen(prefix, prefix->len - 1); return rc; } /* * Traverse the index, find every entry that matches according to - * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the + * o->pl. Do "ce_flags &= ~clear_mask" on those entries. Return the * number of traversed entries. * * If select_mask is non-zero, only entries whose ce_flags has on of @@ -1134,12 +1344,15 @@ static int clear_ce_flags_dir(struct cache_entry **cache, int nr, * cache[0]->name[0..(prefix_len-1)] * Top level path has prefix_len zero. */ -static int clear_ce_flags_1(struct cache_entry **cache, int nr, +static int clear_ce_flags_1(struct index_state *istate, + struct cache_entry **cache, int nr, struct strbuf *prefix, int select_mask, int clear_mask, - struct exclude_list *el, int defval) + struct pattern_list *pl, + enum pattern_match_result default_match, + int progress_nr) { - struct cache_entry **cache_end = cache + nr; + struct cache_entry **cache_end = nr ? cache + nr : cache; /* * Process all entries that have the given prefix and meet @@ -1148,10 +1361,14 @@ static int clear_ce_flags_1(struct cache_entry **cache, int nr, while(cache != cache_end) { struct cache_entry *ce = *cache; const char *name, *slash; - int len, dtype, ret; + int len, dtype; + enum pattern_match_result ret; + + display_progress(istate->progress, progress_nr); if (select_mask && !(ce->ce_flags & select_mask)) { cache++; + progress_nr++; continue; } @@ -1168,60 +1385,88 @@ static int clear_ce_flags_1(struct cache_entry **cache, int nr, len = slash - name; strbuf_add(prefix, name, len); - processed = clear_ce_flags_dir(cache, cache_end - cache, + processed = clear_ce_flags_dir(istate, cache, cache_end - cache, prefix, prefix->buf + prefix->len - len, select_mask, clear_mask, - el, defval); + pl, default_match, + progress_nr); /* clear_c_f_dir eats a whole dir already? */ if (processed) { cache += processed; + progress_nr += processed; strbuf_setlen(prefix, prefix->len - len); continue; } strbuf_addch(prefix, '/'); - cache += clear_ce_flags_1(cache, cache_end - cache, - prefix, - select_mask, clear_mask, el, defval); + processed = clear_ce_flags_1(istate, cache, cache_end - cache, + prefix, + select_mask, clear_mask, pl, + default_match, progress_nr); + + cache += processed; + progress_nr += processed; + strbuf_setlen(prefix, prefix->len - len - 1); continue; } /* Non-directory */ dtype = ce_to_dtype(ce); - ret = is_excluded_from_list(ce->name, ce_namelen(ce), - name, &dtype, el, &the_index); - if (ret < 0) - ret = defval; - if (ret > 0) + ret = path_matches_pattern_list(ce->name, + ce_namelen(ce), + name, &dtype, pl, istate); + if (ret == UNDECIDED) + ret = default_match; + if (ret == MATCHED || ret == MATCHED_RECURSIVE) ce->ce_flags &= ~clear_mask; cache++; + progress_nr++; } + + display_progress(istate->progress, progress_nr); return nr - (cache_end - cache); } -static int clear_ce_flags(struct cache_entry **cache, int nr, - int select_mask, int clear_mask, - struct exclude_list *el) +static int clear_ce_flags(struct index_state *istate, + int select_mask, int clear_mask, + struct pattern_list *pl, + int show_progress) { static struct strbuf prefix = STRBUF_INIT; + char label[100]; + int rval; strbuf_reset(&prefix); - - return clear_ce_flags_1(cache, nr, + if (show_progress) + istate->progress = start_delayed_progress( + _("Updating index flags"), + istate->cache_nr); + + xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)", + (unsigned long)select_mask, (unsigned long)clear_mask); + trace2_region_enter("unpack_trees", label, the_repository); + rval = clear_ce_flags_1(istate, + istate->cache, + istate->cache_nr, &prefix, select_mask, clear_mask, - el, 0); + pl, 0, 0); + trace2_region_leave("unpack_trees", label, the_repository); + + stop_progress(&istate->progress); + return rval; } /* * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout */ -static void mark_new_skip_worktree(struct exclude_list *el, - struct index_state *the_index, - int select_flag, int skip_wt_flag) +static void mark_new_skip_worktree(struct pattern_list *pl, + struct index_state *istate, + int select_flag, int skip_wt_flag, + int show_progress) { int i; @@ -1229,13 +1474,13 @@ static void mark_new_skip_worktree(struct exclude_list *el, * 1. Pretend the narrowest worktree: only unmerged entries * are checked out */ - for (i = 0; i < the_index->cache_nr; i++) { - struct cache_entry *ce = the_index->cache[i]; + for (i = 0; i < istate->cache_nr; i++) { + struct cache_entry *ce = istate->cache[i]; if (select_flag && !(ce->ce_flags & select_flag)) continue; - if (!ce_stage(ce)) + if (!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED)) ce->ce_flags |= skip_wt_flag; else ce->ce_flags &= ~skip_wt_flag; @@ -1245,8 +1490,7 @@ static void mark_new_skip_worktree(struct exclude_list *el, * 2. Widen worktree according to sparse-checkout file. * Matched entries will have skip_wt_flag cleared (i.e. "in") */ - clear_ce_flags(the_index->cache, the_index->cache_nr, - select_flag, skip_wt_flag, el); + clear_ce_flags(istate, select_flag, skip_wt_flag, pl, show_progress); } static int verify_absent(const struct cache_entry *, @@ -1262,20 +1506,22 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options { int i, ret; static struct cache_entry *dfc; - struct exclude_list el; + struct pattern_list pl; if (len > MAX_UNPACK_TREES) die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); - memset(&el, 0, sizeof(el)); + trace_performance_enter(); + memset(&pl, 0, sizeof(pl)); if (!core_apply_sparse_checkout || !o->update) o->skip_sparse_checkout = 1; - if (!o->skip_sparse_checkout) { + if (!o->skip_sparse_checkout && !o->pl) { char *sparse = git_pathdup("info/sparse-checkout"); - if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0) + pl.use_cone_patterns = core_sparse_checkout_cone; + if (add_patterns_from_file_to_list(sparse, "", 0, &pl, NULL) < 0) o->skip_sparse_checkout = 1; else - o->el = ⪙ + o->pl = &pl; free(sparse); } @@ -1284,18 +1530,33 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options o->result.timestamp.sec = o->src_index->timestamp.sec; o->result.timestamp.nsec = o->src_index->timestamp.nsec; o->result.version = o->src_index->version; - o->result.split_index = o->src_index->split_index; - if (o->result.split_index) + if (!o->src_index->split_index) { + o->result.split_index = NULL; + } else if (o->src_index == o->dst_index) { + /* + * o->dst_index (and thus o->src_index) will be discarded + * and overwritten with o->result at the end of this function, + * so just use src_index's split_index to avoid having to + * create a new one. + */ + o->result.split_index = o->src_index->split_index; o->result.split_index->refcount++; - hashcpy(o->result.sha1, o->src_index->sha1); + } else { + o->result.split_index = init_split_index(&o->result); + } + oidcpy(&o->result.oid, &o->src_index->oid); o->merge_size = len; mark_all_ce_unused(o->src_index); + if (o->src_index->fsmonitor_last_update) + o->result.fsmonitor_last_update = o->src_index->fsmonitor_last_update; + /* * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries */ if (!o->skip_sparse_checkout) - mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE); + mark_new_skip_worktree(o->pl, o->src_index, 0, + CE_NEW_SKIP_WORKTREE, o->verbose_update); if (!dfc) dfc = xcalloc(1, cache_entry_size(0)); @@ -1328,7 +1589,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options } } - if (traverse_trees(len, t, &info) < 0) + trace_performance_enter(); + ret = traverse_trees(o->src_index, len, t, &info); + trace_performance_leave("traverse_trees"); + if (ret < 0) goto return_failed; } @@ -1357,7 +1621,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE * so apply_sparse_checkout() won't attempt to remove it from worktree */ - mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); + mark_new_skip_worktree(o->pl, &o->result, + CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE, + o->verbose_update); ret = 0; for (i = 0; i < o->result.cache_nr; i++) { @@ -1401,10 +1667,12 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options } } - o->src_index = NULL; ret = check_updates(o) ? (-2) : 0; if (o->dst_index) { + move_index_extensions(&o->result, o->src_index); if (!ret) { + if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0)) + cache_tree_verify(the_repository, &o->result); if (!o->result.cache_tree) o->result.cache_tree = cache_tree(); if (!cache_tree_fully_valid(o->result.cache_tree)) @@ -1412,15 +1680,19 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options WRITE_TREE_SILENT | WRITE_TREE_REPAIR); } - move_index_extensions(&o->result, o->dst_index); + + o->result.updated_workdir = 1; discard_index(o->dst_index); *o->dst_index = o->result; } else { discard_index(&o->result); } + o->src_index = NULL; done: - clear_exclude_list(&el); + trace_performance_leave("unpack_trees"); + if (!o->keep_pattern_list) + clear_pattern_list(&pl); return ret; return_failed: @@ -1438,8 +1710,7 @@ return_failed: static int reject_merge(const struct cache_entry *ce, struct unpack_trees_options *o) { - return o->gently ? -1 : - add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name); + return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name); } static int same(const struct cache_entry *a, const struct cache_entry *b) @@ -1451,7 +1722,7 @@ static int same(const struct cache_entry *a, const struct cache_entry *b) if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED) return 0; return a->ce_mode == b->ce_mode && - !oidcmp(&a->oid, &b->oid); + oideq(&a->oid, &b->oid); } @@ -1486,8 +1757,7 @@ static int verify_uptodate_1(const struct cache_entry *ce, int r = check_submodule_move_head(ce, "HEAD", oid_to_hex(&ce->oid), o); if (r) - return o->gently ? -1 : - add_rejected_path(o, error_type, ce->name); + return add_rejected_path(o, error_type, ce->name); return 0; } @@ -1505,12 +1775,11 @@ static int verify_uptodate_1(const struct cache_entry *ce, } if (errno == ENOENT) return 0; - return o->gently ? -1 : - add_rejected_path(o, error_type, ce->name); + return add_rejected_path(o, error_type, ce->name); } -static int verify_uptodate(const struct cache_entry *ce, - struct unpack_trees_options *o) +int verify_uptodate(const struct cache_entry *ce, + struct unpack_trees_options *o) { if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) return 0; @@ -1523,6 +1792,17 @@ static int verify_uptodate_sparse(const struct cache_entry *ce, return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE); } +/* + * TODO: We should actually invalidate o->result, not src_index [1]. + * But since cache tree and untracked cache both are not copied to + * o->result until unpacking is complete, we invalidate them on + * src_index instead with the assumption that they will be copied to + * dst_index at the end. + * + * [1] src_index->cache_tree is also used in unpack_callback() so if + * we invalidate o->result, we need to update it to use + * o->result.cache_tree as well. + */ static void invalidate_ce_path(const struct cache_entry *ce, struct unpack_trees_options *o) { @@ -1541,7 +1821,6 @@ static void invalidate_ce_path(const struct cache_entry *ce, */ static int verify_clean_submodule(const char *old_sha1, const struct cache_entry *ce, - enum unpack_trees_error_types error_type, struct unpack_trees_options *o) { if (!submodule_from_ce(ce)) @@ -1552,7 +1831,6 @@ static int verify_clean_submodule(const char *old_sha1, } static int verify_clean_subdirectory(const struct cache_entry *ce, - enum unpack_trees_error_types error_type, struct unpack_trees_options *o) { /* @@ -1572,10 +1850,10 @@ static int verify_clean_subdirectory(const struct cache_entry *ce, * If we are not going to update the submodule, then * we don't care. */ - if (!sub_head && !oidcmp(&oid, &ce->oid)) + if (!sub_head && oideq(&oid, &ce->oid)) return 0; return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid), - ce, error_type, o); + ce, o); } /* @@ -1600,6 +1878,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce, if (verify_uptodate(ce2, o)) return -1; add_entry(o, ce2, CE_REMOVE, 0); + invalidate_ce_path(ce, o); mark_ce_used(ce2, o); } cnt++; @@ -1614,10 +1893,9 @@ static int verify_clean_subdirectory(const struct cache_entry *ce, memset(&d, 0, sizeof(d)); if (o->dir) d.exclude_per_dir = o->dir->exclude_per_dir; - i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL); + i = read_directory(&d, o->src_index, pathbuf, namelen+1, NULL); if (i) - return o->gently ? -1 : - add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name); + return add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name); free(pathbuf); return cnt; } @@ -1656,7 +1934,7 @@ static int check_ok_to_remove(const char *name, int len, int dtype, return 0; if (o->dir && - is_excluded(o->dir, &the_index, name, &dtype)) + is_excluded(o->dir, o->src_index, name, &dtype)) /* * ce->name is explicitly excluded, so it is Ok to * overwrite it. @@ -1670,7 +1948,7 @@ static int check_ok_to_remove(const char *name, int len, int dtype, * files that are in "foo/" we would lose * them. */ - if (verify_clean_subdirectory(ce, error_type, o) < 0) + if (verify_clean_subdirectory(ce, o) < 0) return -1; return 0; } @@ -1686,8 +1964,7 @@ static int check_ok_to_remove(const char *name, int len, int dtype, return 0; } - return o->gently ? -1 : - add_rejected_path(o, error_type, name); + return add_rejected_path(o, error_type, name); } /* @@ -1765,7 +2042,7 @@ static int merged_entry(const struct cache_entry *ce, struct unpack_trees_options *o) { int update = CE_UPDATE; - struct cache_entry *merge = dup_entry(ce); + struct cache_entry *merge = dup_cache_entry(ce, &o->result); if (!old) { /* @@ -1785,7 +2062,7 @@ static int merged_entry(const struct cache_entry *ce, if (verify_absent(merge, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) { - free(merge); + discard_cache_entry(merge); return -1; } invalidate_ce_path(merge, o); @@ -1811,7 +2088,7 @@ static int merged_entry(const struct cache_entry *ce, update = 0; } else { if (verify_uptodate(old, o)) { - free(merge); + discard_cache_entry(merge); return -1; } /* Migrate old flags over */ @@ -1860,6 +2137,8 @@ static int keep_entry(const struct cache_entry *ce, struct unpack_trees_options *o) { add_entry(o, ce, 0, 0); + if (ce_stage(ce)) + invalidate_ce_path(ce, o); return 1; } @@ -2126,7 +2405,7 @@ int bind_merge(const struct cache_entry * const *src, return error("Cannot do a bind merge of %d trees", o->merge_size); if (a && old) - return o->gently ? -1 : + return o->quiet ? -1 : error(ERRORMSG(o, ERROR_BIND_OVERLAP), super_prefixed(a->name), super_prefixed(old->name)); @@ -2157,7 +2436,8 @@ int oneway_merge(const struct cache_entry * const *src, if (old && same(old, a)) { int update = 0; - if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) { + if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old) && + !(old->ce_flags & CE_FSMONITOR_VALID)) { struct stat st; if (lstat(old->name, &st) || ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE)) @@ -2166,7 +2446,7 @@ int oneway_merge(const struct cache_entry * const *src, if (o->update && S_ISGITLINK(old->ce_mode) && should_update_submodules() && !verify_uptodate(old, o)) update |= CE_UPDATE; - add_entry(o, old, update, 0); + add_entry(o, old, update, CE_STAGEMASK); return 0; } return merged_entry(a, old, o); |