diff options
Diffstat (limited to 'refs.c')
-rw-r--r-- | refs.c | 1082 |
1 files changed, 868 insertions, 214 deletions
@@ -72,6 +72,10 @@ int check_refname_format(const char *refname, int flags) { int component_len, component_count = 0; + if (!strcmp(refname, "@")) + /* Refname is a single character '@'. */ + return -1; + while (1) { /* We are at the start of a path component. */ component_len = check_refname_component(refname, flags); @@ -109,7 +113,20 @@ struct ref_entry; * (ref_entry->flag & REF_DIR) is zero. */ struct ref_value { + /* + * The name of the object to which this reference resolves + * (which may be a tag object). If REF_ISBROKEN, this is + * null. If REF_ISSYMREF, then this is the name of the object + * referred to by the last reference in the symlink chain. + */ unsigned char sha1[20]; + + /* + * If REF_KNOWS_PEELED, then this field holds the peeled value + * of this reference, or null if the reference is known not to + * be peelable. See the documentation for peel_ref() for an + * exact definition of "peelable". + */ unsigned char peeled[20]; }; @@ -158,7 +175,17 @@ struct ref_dir { struct ref_entry **entries; }; -/* ISSYMREF=0x01, ISPACKED=0x02, and ISBROKEN=0x04 are public interfaces */ +/* + * Bit values for ref_entry::flag. REF_ISSYMREF=0x01, + * REF_ISPACKED=0x02, and REF_ISBROKEN=0x04 are public values; see + * refs.h. + */ + +/* + * The field ref_entry->u.value.peeled of this value entry contains + * the correct peeled value for the reference, which might be + * null_sha1 if the reference is not a tag or if it is broken. + */ #define REF_KNOWS_PEELED 0x08 /* ref_entry represents a directory of references */ @@ -343,18 +370,17 @@ static int ref_entry_cmp_sslice(const void *key_, const void *ent_) } /* - * Return the entry with the given refname from the ref_dir - * (non-recursively), sorting dir if necessary. Return NULL if no - * such entry is found. dir must already be complete. + * Return the index of the entry with the given refname from the + * ref_dir (non-recursively), sorting dir if necessary. Return -1 if + * no such entry is found. dir must already be complete. */ -static struct ref_entry *search_ref_dir(struct ref_dir *dir, - const char *refname, size_t len) +static int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len) { struct ref_entry **r; struct string_slice key; if (refname == NULL || !dir->nr) - return NULL; + return -1; sort_ref_dir(dir); key.len = len; @@ -363,9 +389,9 @@ static struct ref_entry *search_ref_dir(struct ref_dir *dir, ref_entry_cmp_sslice); if (r == NULL) - return NULL; + return -1; - return *r; + return r - dir->entries; } /* @@ -379,8 +405,9 @@ static struct ref_dir *search_for_subdir(struct ref_dir *dir, const char *subdirname, size_t len, int mkdir) { - struct ref_entry *entry = search_ref_dir(dir, subdirname, len); - if (!entry) { + int entry_index = search_ref_dir(dir, subdirname, len); + struct ref_entry *entry; + if (entry_index == -1) { if (!mkdir) return NULL; /* @@ -391,6 +418,8 @@ static struct ref_dir *search_for_subdir(struct ref_dir *dir, */ entry = create_dir_entry(dir->ref_cache, subdirname, len, 0); add_entry_to_dir(dir, entry); + } else { + entry = dir->entries[entry_index]; } return get_ref_dir(entry); } @@ -429,12 +458,67 @@ static struct ref_dir *find_containing_dir(struct ref_dir *dir, */ static struct ref_entry *find_ref(struct ref_dir *dir, const char *refname) { + int entry_index; struct ref_entry *entry; dir = find_containing_dir(dir, refname, 0); if (!dir) return NULL; - entry = search_ref_dir(dir, refname, strlen(refname)); - return (entry && !(entry->flag & REF_DIR)) ? entry : NULL; + entry_index = search_ref_dir(dir, refname, strlen(refname)); + if (entry_index == -1) + return NULL; + entry = dir->entries[entry_index]; + return (entry->flag & REF_DIR) ? NULL : entry; +} + +/* + * Remove the entry with the given name from dir, recursing into + * subdirectories as necessary. If refname is the name of a directory + * (i.e., ends with '/'), then remove the directory and its contents. + * If the removal was successful, return the number of entries + * remaining in the directory entry that contained the deleted entry. + * If the name was not found, return -1. Please note that this + * function only deletes the entry from the cache; it does not delete + * it from the filesystem or ensure that other cache entries (which + * might be symbolic references to the removed entry) are updated. + * Nor does it remove any containing dir entries that might be made + * empty by the removal. dir must represent the top-level directory + * and must already be complete. + */ +static int remove_entry(struct ref_dir *dir, const char *refname) +{ + int refname_len = strlen(refname); + int entry_index; + struct ref_entry *entry; + int is_dir = refname[refname_len - 1] == '/'; + if (is_dir) { + /* + * refname represents a reference directory. Remove + * the trailing slash; otherwise we will get the + * directory *representing* refname rather than the + * one *containing* it. + */ + char *dirname = xmemdupz(refname, refname_len - 1); + dir = find_containing_dir(dir, dirname, 0); + free(dirname); + } else { + dir = find_containing_dir(dir, refname, 0); + } + if (!dir) + return -1; + entry_index = search_ref_dir(dir, refname, refname_len); + if (entry_index == -1) + return -1; + entry = dir->entries[entry_index]; + + memmove(&dir->entries[entry_index], + &dir->entries[entry_index + 1], + (dir->nr - entry_index - 1) * sizeof(*dir->entries) + ); + dir->nr--; + if (dir->sorted > entry_index) + dir->sorted--; + free_ref_entry(entry); + return dir->nr; } /* @@ -503,28 +587,69 @@ static void sort_ref_dir(struct ref_dir *dir) dir->sorted = dir->nr = i; } -#define DO_FOR_EACH_INCLUDE_BROKEN 01 +/* Include broken references in a do_for_each_ref*() iteration: */ +#define DO_FOR_EACH_INCLUDE_BROKEN 0x01 +/* + * Return true iff the reference described by entry can be resolved to + * an object in the database. Emit a warning if the referred-to + * object does not exist. + */ +static int ref_resolves_to_object(struct ref_entry *entry) +{ + if (entry->flag & REF_ISBROKEN) + return 0; + if (!has_sha1_file(entry->u.value.sha1)) { + error("%s does not point to a valid object!", entry->name); + return 0; + } + return 1; +} + +/* + * current_ref is a performance hack: when iterating over references + * using the for_each_ref*() functions, current_ref is set to the + * current reference's entry before calling the callback function. If + * the callback function calls peel_ref(), then peel_ref() first + * checks whether the reference to be peeled is the current reference + * (it usually is) and if so, returns that reference's peeled version + * if it is available. This avoids a refname lookup in a common case. + */ static struct ref_entry *current_ref; -static int do_one_ref(const char *base, each_ref_fn fn, int trim, - int flags, void *cb_data, struct ref_entry *entry) +typedef int each_ref_entry_fn(struct ref_entry *entry, void *cb_data); + +struct ref_entry_cb { + const char *base; + int trim; + int flags; + each_ref_fn *fn; + void *cb_data; +}; + +/* + * Handle one reference in a do_for_each_ref*()-style iteration, + * calling an each_ref_fn for each entry. + */ +static int do_one_ref(struct ref_entry *entry, void *cb_data) { + struct ref_entry_cb *data = cb_data; + struct ref_entry *old_current_ref; int retval; - if (prefixcmp(entry->name, base)) + + if (prefixcmp(entry->name, data->base)) return 0; - if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) { - if (entry->flag & REF_ISBROKEN) - return 0; /* ignore broken refs e.g. dangling symref */ - if (!has_sha1_file(entry->u.value.sha1)) { - error("%s does not point to a valid object!", entry->name); - return 0; - } - } + if (!(data->flags & DO_FOR_EACH_INCLUDE_BROKEN) && + !ref_resolves_to_object(entry)) + return 0; + + /* Store the old value, in case this is a recursive call: */ + old_current_ref = current_ref; current_ref = entry; - retval = fn(entry->name + trim, entry->u.value.sha1, entry->flag, cb_data); - current_ref = NULL; + retval = data->fn(entry->name + data->trim, entry->u.value.sha1, + entry->flag, data->cb_data); + current_ref = old_current_ref; return retval; } @@ -532,11 +657,11 @@ static int do_one_ref(const char *base, each_ref_fn fn, int trim, * Call fn for each reference in dir that has index in the range * offset <= index < dir->nr. Recurse into subdirectories that are in * that index range, sorting them before iterating. This function - * does not sort dir itself; it should be sorted beforehand. + * does not sort dir itself; it should be sorted beforehand. fn is + * called for all references, including broken ones. */ -static int do_for_each_ref_in_dir(struct ref_dir *dir, int offset, - const char *base, - each_ref_fn fn, int trim, int flags, void *cb_data) +static int do_for_each_entry_in_dir(struct ref_dir *dir, int offset, + each_ref_entry_fn fn, void *cb_data) { int i; assert(dir->sorted == dir->nr); @@ -546,10 +671,9 @@ static int do_for_each_ref_in_dir(struct ref_dir *dir, int offset, if (entry->flag & REF_DIR) { struct ref_dir *subdir = get_ref_dir(entry); sort_ref_dir(subdir); - retval = do_for_each_ref_in_dir(subdir, 0, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dir(subdir, 0, fn, cb_data); } else { - retval = do_one_ref(base, fn, trim, flags, cb_data, entry); + retval = fn(entry, cb_data); } if (retval) return retval; @@ -562,12 +686,12 @@ static int do_for_each_ref_in_dir(struct ref_dir *dir, int offset, * by refname. Recurse into subdirectories. If a value entry appears * in both dir1 and dir2, then only process the version that is in * dir2. The input dirs must already be sorted, but subdirs will be - * sorted as needed. + * sorted as needed. fn is called for all references, including + * broken ones. */ -static int do_for_each_ref_in_dirs(struct ref_dir *dir1, - struct ref_dir *dir2, - const char *base, each_ref_fn fn, int trim, - int flags, void *cb_data) +static int do_for_each_entry_in_dirs(struct ref_dir *dir1, + struct ref_dir *dir2, + each_ref_entry_fn fn, void *cb_data) { int retval; int i1 = 0, i2 = 0; @@ -578,12 +702,10 @@ static int do_for_each_ref_in_dirs(struct ref_dir *dir1, struct ref_entry *e1, *e2; int cmp; if (i1 == dir1->nr) { - return do_for_each_ref_in_dir(dir2, i2, - base, fn, trim, flags, cb_data); + return do_for_each_entry_in_dir(dir2, i2, fn, cb_data); } if (i2 == dir2->nr) { - return do_for_each_ref_in_dir(dir1, i1, - base, fn, trim, flags, cb_data); + return do_for_each_entry_in_dir(dir1, i1, fn, cb_data); } e1 = dir1->entries[i1]; e2 = dir2->entries[i2]; @@ -595,14 +717,13 @@ static int do_for_each_ref_in_dirs(struct ref_dir *dir1, struct ref_dir *subdir2 = get_ref_dir(e2); sort_ref_dir(subdir1); sort_ref_dir(subdir2); - retval = do_for_each_ref_in_dirs( - subdir1, subdir2, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dirs( + subdir1, subdir2, fn, cb_data); i1++; i2++; } else if (!(e1->flag & REF_DIR) && !(e2->flag & REF_DIR)) { /* Both are references; ignore the one from dir1. */ - retval = do_one_ref(base, fn, trim, flags, cb_data, e2); + retval = fn(e2, cb_data); i1++; i2++; } else { @@ -621,26 +742,33 @@ static int do_for_each_ref_in_dirs(struct ref_dir *dir1, if (e->flag & REF_DIR) { struct ref_dir *subdir = get_ref_dir(e); sort_ref_dir(subdir); - retval = do_for_each_ref_in_dir( - subdir, 0, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dir( + subdir, 0, fn, cb_data); } else { - retval = do_one_ref(base, fn, trim, flags, cb_data, e); + retval = fn(e, cb_data); } } if (retval) return retval; } - if (i1 < dir1->nr) - return do_for_each_ref_in_dir(dir1, i1, - base, fn, trim, flags, cb_data); - if (i2 < dir2->nr) - return do_for_each_ref_in_dir(dir2, i2, - base, fn, trim, flags, cb_data); - return 0; } /* + * Load all of the refs from the dir into our in-memory cache. The hard work + * of loading loose refs is done by get_ref_dir(), so we just need to recurse + * through all of the sub-directories. We do not even need to care about + * sorting, as traversal order does not matter to us. + */ +static void prime_ref_dir(struct ref_dir *dir) +{ + int i; + for (i = 0; i < dir->nr; i++) { + struct ref_entry *entry = dir->entries[i]; + if (entry->flag & REF_DIR) + prime_ref_dir(get_ref_dir(entry)); + } +} +/* * Return true iff refname1 and refname2 conflict with each other. * Two reference names conflict if one of them exactly matches the * leading components of the other; e.g., "foo/bar" conflicts with @@ -661,14 +789,13 @@ struct name_conflict_cb { const char *conflicting_refname; }; -static int name_conflict_fn(const char *existingrefname, const unsigned char *sha1, - int flags, void *cb_data) +static int name_conflict_fn(struct ref_entry *entry, void *cb_data) { struct name_conflict_cb *data = (struct name_conflict_cb *)cb_data; - if (data->oldrefname && !strcmp(data->oldrefname, existingrefname)) + if (data->oldrefname && !strcmp(data->oldrefname, entry->name)) return 0; - if (names_conflict(data->refname, existingrefname)) { - data->conflicting_refname = existingrefname; + if (names_conflict(data->refname, entry->name)) { + data->conflicting_refname = entry->name; return 1; } return 0; @@ -676,7 +803,7 @@ static int name_conflict_fn(const char *existingrefname, const unsigned char *sh /* * Return true iff a reference named refname could be created without - * conflicting with the name of an existing reference in array. If + * conflicting with the name of an existing reference in dir. If * oldrefname is non-NULL, ignore potential conflicts with oldrefname * (e.g., because oldrefname is scheduled for deletion in the same * operation). @@ -690,9 +817,7 @@ static int is_refname_available(const char *refname, const char *oldrefname, data.conflicting_refname = NULL; sort_ref_dir(dir); - if (do_for_each_ref_in_dir(dir, 0, "", name_conflict_fn, - 0, DO_FOR_EACH_INCLUDE_BROKEN, - &data)) { + if (do_for_each_entry_in_dir(dir, 0, name_conflict_fn, &data)) { error("'%s' exists; cannot create '%s'", data.conflicting_refname, refname); return 0; @@ -700,6 +825,30 @@ static int is_refname_available(const char *refname, const char *oldrefname, return 1; } +struct packed_ref_cache { + struct ref_entry *root; + + /* + * Count of references to the data structure in this instance, + * including the pointer from ref_cache::packed if any. The + * data will not be freed as long as the reference count is + * nonzero. + */ + unsigned int referrers; + + /* + * Iff the packed-refs file associated with this instance is + * currently locked for writing, this points at the associated + * lock (which is owned by somebody else). The referrer count + * is also incremented when the file is locked and decremented + * when it is unlocked. + */ + struct lock_file *lock; + + /* The metadata from when this packed-refs cache was read */ + struct stat_validity validity; +}; + /* * Future: need to be in "struct repository" * when doing a full libification. @@ -707,16 +856,51 @@ static int is_refname_available(const char *refname, const char *oldrefname, static struct ref_cache { struct ref_cache *next; struct ref_entry *loose; - struct ref_entry *packed; - /* The submodule name, or "" for the main repo. */ - char name[FLEX_ARRAY]; -} *ref_cache; + struct packed_ref_cache *packed; + /* + * The submodule name, or "" for the main repo. We allocate + * length 1 rather than FLEX_ARRAY so that the main ref_cache + * is initialized correctly. + */ + char name[1]; +} ref_cache, *submodule_ref_caches; + +/* Lock used for the main packed-refs file: */ +static struct lock_file packlock; + +/* + * Increment the reference count of *packed_refs. + */ +static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) +{ + packed_refs->referrers++; +} + +/* + * Decrease the reference count of *packed_refs. If it goes to zero, + * free *packed_refs and return true; otherwise return false. + */ +static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) +{ + if (!--packed_refs->referrers) { + free_ref_entry(packed_refs->root); + stat_validity_clear(&packed_refs->validity); + free(packed_refs); + return 1; + } else { + return 0; + } +} static void clear_packed_ref_cache(struct ref_cache *refs) { if (refs->packed) { - free_ref_entry(refs->packed); + struct packed_ref_cache *packed_refs = refs->packed; + + if (packed_refs->lock) + die("internal error: packed-ref cache cleared while locked"); refs->packed = NULL; + release_packed_ref_cache(packed_refs); } } @@ -748,18 +932,18 @@ static struct ref_cache *create_ref_cache(const char *submodule) */ static struct ref_cache *get_ref_cache(const char *submodule) { - struct ref_cache *refs = ref_cache; - if (!submodule) - submodule = ""; - while (refs) { + struct ref_cache *refs; + + if (!submodule || !*submodule) + return &ref_cache; + + for (refs = submodule_ref_caches; refs; refs = refs->next) if (!strcmp(submodule, refs->name)) return refs; - refs = refs->next; - } refs = create_ref_cache(submodule); - refs->next = ref_cache; - ref_cache = refs; + refs->next = submodule_ref_caches; + submodule_ref_caches = refs; return refs; } @@ -770,6 +954,16 @@ void invalidate_ref_cache(const char *submodule) clear_loose_ref_cache(refs); } +/* The length of a peeled reference line in packed-refs, including EOL: */ +#define PEELED_LINE_LENGTH 42 + +/* + * The packed-refs header line that we write out. Perhaps other + * traits will be added later. The trailing space is required. + */ +static const char PACKED_REFS_HEADER[] = + "# pack-refs with: peeled fully-peeled \n"; + /* * Parse one line from a packed-refs file. Write the SHA1 to sha1. * Return a pointer to the refname within the line (null-terminated), @@ -862,8 +1056,8 @@ static void read_packed_refs(FILE *f, struct ref_dir *dir) } if (last && refline[0] == '^' && - strlen(refline) == 42 && - refline[41] == '\n' && + strlen(refline) == PEELED_LINE_LENGTH && + refline[PEELED_LINE_LENGTH - 1] == '\n' && !get_sha1_hex(refline + 1, sha1)) { hashcpy(last->u.value.peeled, sha1); /* @@ -876,30 +1070,58 @@ static void read_packed_refs(FILE *f, struct ref_dir *dir) } } -static struct ref_dir *get_packed_refs(struct ref_cache *refs) +/* + * Get the packed_ref_cache for the specified ref_cache, creating it + * if necessary. + */ +static struct packed_ref_cache *get_packed_ref_cache(struct ref_cache *refs) { + const char *packed_refs_file; + + if (*refs->name) + packed_refs_file = git_path_submodule(refs->name, "packed-refs"); + else + packed_refs_file = git_path("packed-refs"); + + if (refs->packed && + !stat_validity_check(&refs->packed->validity, packed_refs_file)) + clear_packed_ref_cache(refs); + if (!refs->packed) { - const char *packed_refs_file; FILE *f; - refs->packed = create_dir_entry(refs, "", 0, 0); - if (*refs->name) - packed_refs_file = git_path_submodule(refs->name, "packed-refs"); - else - packed_refs_file = git_path("packed-refs"); + refs->packed = xcalloc(1, sizeof(*refs->packed)); + acquire_packed_ref_cache(refs->packed); + refs->packed->root = create_dir_entry(refs, "", 0, 0); f = fopen(packed_refs_file, "r"); if (f) { - read_packed_refs(f, get_ref_dir(refs->packed)); + stat_validity_update(&refs->packed->validity, fileno(f)); + read_packed_refs(f, get_ref_dir(refs->packed->root)); fclose(f); } } - return get_ref_dir(refs->packed); + return refs->packed; +} + +static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) +{ + return get_ref_dir(packed_ref_cache->root); +} + +static struct ref_dir *get_packed_refs(struct ref_cache *refs) +{ + return get_packed_ref_dir(get_packed_ref_cache(refs)); } void add_packed_ref(const char *refname, const unsigned char *sha1) { - add_ref(get_packed_refs(get_ref_cache(NULL)), - create_ref_entry(refname, sha1, REF_ISPACKED, 1)); + struct packed_ref_cache *packed_ref_cache = + get_packed_ref_cache(&ref_cache); + + if (!packed_ref_cache->lock) + die("internal error: packed refs not locked"); + add_ref(get_packed_ref_dir(packed_ref_cache), + create_ref_entry(refname, sha1, REF_ISPACKED, 1)); } /* @@ -1069,18 +1291,43 @@ int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *sh } /* - * Try to read ref from the packed references. On success, set sha1 - * and return 0; otherwise, return -1. + * Return the ref_entry for the given refname from the packed + * references. If it does not exist, return NULL. */ -static int get_packed_ref(const char *refname, unsigned char *sha1) +static struct ref_entry *get_packed_ref(const char *refname) { - struct ref_dir *packed = get_packed_refs(get_ref_cache(NULL)); - struct ref_entry *entry = find_ref(packed, refname); + return find_ref(get_packed_refs(&ref_cache), refname); +} + +/* + * A loose ref file doesn't exist; check for a packed ref. The + * options are forwarded from resolve_safe_unsafe(). + */ +static const char *handle_missing_loose_ref(const char *refname, + unsigned char *sha1, + int reading, + int *flag) +{ + struct ref_entry *entry; + + /* + * The loose reference file does not exist; check for a packed + * reference. + */ + entry = get_packed_ref(refname); if (entry) { hashcpy(sha1, entry->u.value.sha1); - return 0; + if (flag) + *flag |= REF_ISPACKED; + return refname; + } + /* The reference is not a packed reference, either. */ + if (reading) { + return NULL; + } else { + hashclr(sha1); + return refname; } - return -1; } const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int reading, int *flag) @@ -1107,32 +1354,34 @@ const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int rea git_snpath(path, sizeof(path), "%s", refname); + /* + * We might have to loop back here to avoid a race + * condition: first we lstat() the file, then we try + * to read it as a link or as a file. But if somebody + * changes the type of the file (file <-> directory + * <-> symlink) between the lstat() and reading, then + * we don't want to report that as an error but rather + * try again starting with the lstat(). + */ + stat_ref: if (lstat(path, &st) < 0) { - if (errno != ENOENT) - return NULL; - /* - * The loose reference file does not exist; - * check for a packed reference. - */ - if (!get_packed_ref(refname, sha1)) { - if (flag) - *flag |= REF_ISPACKED; - return refname; - } - /* The reference is not a packed reference, either. */ - if (reading) { + if (errno == ENOENT) + return handle_missing_loose_ref(refname, sha1, + reading, flag); + else return NULL; - } else { - hashclr(sha1); - return refname; - } } /* Follow "normalized" - ie "refs/.." symlinks by hand */ if (S_ISLNK(st.st_mode)) { len = readlink(path, buffer, sizeof(buffer)-1); - if (len < 0) - return NULL; + if (len < 0) { + if (errno == ENOENT || errno == EINVAL) + /* inconsistent with lstat; retry */ + goto stat_ref; + else + return NULL; + } buffer[len] = 0; if (!prefixcmp(buffer, "refs/") && !check_refname_format(buffer, 0)) { @@ -1155,8 +1404,13 @@ const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int rea * a ref */ fd = open(path, O_RDONLY); - if (fd < 0) - return NULL; + if (fd < 0) { + if (errno == ENOENT) + /* inconsistent with lstat; retry */ + goto stat_ref; + else + return NULL; + } len = read_in_full(fd, buffer, sizeof(buffer)-1); close(fd); if (len < 0) @@ -1168,8 +1422,19 @@ const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int rea /* * Is it a symbolic ref? */ - if (prefixcmp(buffer, "ref:")) - break; + if (prefixcmp(buffer, "ref:")) { + /* + * Please note that FETCH_HEAD has a second + * line containing other data. + */ + if (get_sha1_hex(buffer, sha1) || + (buffer[40] != '\0' && !isspace(buffer[40]))) { + if (flag) + *flag |= REF_ISBROKEN; + return NULL; + } + return refname; + } if (flag) *flag |= REF_ISSYMREF; buf = buffer + 4; @@ -1182,13 +1447,6 @@ const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int rea } refname = strcpy(refname_buffer, buf); } - /* Please note that FETCH_HEAD has a second line containing other data. */ - if (get_sha1_hex(buffer, sha1) || (buffer[40] != '\0' && !isspace(buffer[40]))) { - if (flag) - *flag |= REF_ISBROKEN; - return NULL; - } - return refname; } char *resolve_refdup(const char *ref, unsigned char *sha1, int reading, int *flag) @@ -1231,54 +1489,130 @@ static int filter_refs(const char *refname, const unsigned char *sha1, int flags return filter->fn(refname, sha1, flags, filter->cb_data); } +enum peel_status { + /* object was peeled successfully: */ + PEEL_PEELED = 0, + + /* + * object cannot be peeled because the named object (or an + * object referred to by a tag in the peel chain), does not + * exist. + */ + PEEL_INVALID = -1, + + /* object cannot be peeled because it is not a tag: */ + PEEL_NON_TAG = -2, + + /* ref_entry contains no peeled value because it is a symref: */ + PEEL_IS_SYMREF = -3, + + /* + * ref_entry cannot be peeled because it is broken (i.e., the + * symbolic reference cannot even be resolved to an object + * name): + */ + PEEL_BROKEN = -4 +}; + +/* + * Peel the named object; i.e., if the object is a tag, resolve the + * tag recursively until a non-tag is found. If successful, store the + * result to sha1 and return PEEL_PEELED. If the object is not a tag + * or is not valid, return PEEL_NON_TAG or PEEL_INVALID, respectively, + * and leave sha1 unchanged. + */ +static enum peel_status peel_object(const unsigned char *name, unsigned char *sha1) +{ + struct object *o = lookup_unknown_object(name); + + if (o->type == OBJ_NONE) { + int type = sha1_object_info(name, NULL); + if (type < 0) + return PEEL_INVALID; + o->type = type; + } + + if (o->type != OBJ_TAG) + return PEEL_NON_TAG; + + o = deref_tag_noverify(o); + if (!o) + return PEEL_INVALID; + + hashcpy(sha1, o->sha1); + return PEEL_PEELED; +} + +/* + * Peel the entry (if possible) and return its new peel_status. If + * repeel is true, re-peel the entry even if there is an old peeled + * value that is already stored in it. + * + * It is OK to call this function with a packed reference entry that + * might be stale and might even refer to an object that has since + * been garbage-collected. In such a case, if the entry has + * REF_KNOWS_PEELED then leave the status unchanged and return + * PEEL_PEELED or PEEL_NON_TAG; otherwise, return PEEL_INVALID. + */ +static enum peel_status peel_entry(struct ref_entry *entry, int repeel) +{ + enum peel_status status; + + if (entry->flag & REF_KNOWS_PEELED) { + if (repeel) { + entry->flag &= ~REF_KNOWS_PEELED; + hashclr(entry->u.value.peeled); + } else { + return is_null_sha1(entry->u.value.peeled) ? + PEEL_NON_TAG : PEEL_PEELED; + } + } + if (entry->flag & REF_ISBROKEN) + return PEEL_BROKEN; + if (entry->flag & REF_ISSYMREF) + return PEEL_IS_SYMREF; + + status = peel_object(entry->u.value.sha1, entry->u.value.peeled); + if (status == PEEL_PEELED || status == PEEL_NON_TAG) + entry->flag |= REF_KNOWS_PEELED; + return status; +} + int peel_ref(const char *refname, unsigned char *sha1) { int flag; unsigned char base[20]; - struct object *o; if (current_ref && (current_ref->name == refname - || !strcmp(current_ref->name, refname))) { - if (current_ref->flag & REF_KNOWS_PEELED) { - if (is_null_sha1(current_ref->u.value.peeled)) - return -1; - hashcpy(sha1, current_ref->u.value.peeled); - return 0; - } - hashcpy(base, current_ref->u.value.sha1); - goto fallback; + || !strcmp(current_ref->name, refname))) { + if (peel_entry(current_ref, 0)) + return -1; + hashcpy(sha1, current_ref->u.value.peeled); + return 0; } if (read_ref_full(refname, base, 1, &flag)) return -1; - if ((flag & REF_ISPACKED)) { - struct ref_dir *dir = get_packed_refs(get_ref_cache(NULL)); - struct ref_entry *r = find_ref(dir, refname); - - if (r != NULL && r->flag & REF_KNOWS_PEELED) { + /* + * If the reference is packed, read its ref_entry from the + * cache in the hope that we already know its peeled value. + * We only try this optimization on packed references because + * (a) forcing the filling of the loose reference cache could + * be expensive and (b) loose references anyway usually do not + * have REF_KNOWS_PEELED. + */ + if (flag & REF_ISPACKED) { + struct ref_entry *r = get_packed_ref(refname); + if (r) { + if (peel_entry(r, 0)) + return -1; hashcpy(sha1, r->u.value.peeled); return 0; } } -fallback: - o = lookup_unknown_object(base); - if (o->type == OBJ_NONE) { - int type = sha1_object_info(base, NULL); - if (type < 0) - return -1; - o->type = type; - } - - if (o->type == OBJ_TAG) { - o = deref_tag_noverify(o); - if (o) { - hashcpy(sha1, o->sha1); - return 0; - } - } - return -1; + return peel_object(base, sha1); } struct warn_if_dangling_data { @@ -1316,40 +1650,84 @@ void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname) for_each_rawref(warn_if_dangling_symref, &data); } -static int do_for_each_ref(const char *submodule, const char *base, each_ref_fn fn, - int trim, int flags, void *cb_data) +/* + * Call fn for each reference in the specified ref_cache, omitting + * references not in the containing_dir of base. fn is called for all + * references, including broken ones. If fn ever returns a non-zero + * value, stop the iteration and return that value; otherwise, return + * 0. + */ +static int do_for_each_entry(struct ref_cache *refs, const char *base, + each_ref_entry_fn fn, void *cb_data) { - struct ref_cache *refs = get_ref_cache(submodule); - struct ref_dir *packed_dir = get_packed_refs(refs); - struct ref_dir *loose_dir = get_loose_refs(refs); + struct packed_ref_cache *packed_ref_cache; + struct ref_dir *loose_dir; + struct ref_dir *packed_dir; int retval = 0; + /* + * We must make sure that all loose refs are read before accessing the + * packed-refs file; this avoids a race condition in which loose refs + * are migrated to the packed-refs file by a simultaneous process, but + * our in-memory view is from before the migration. get_packed_ref_cache() + * takes care of making sure our view is up to date with what is on + * disk. + */ + loose_dir = get_loose_refs(refs); if (base && *base) { - packed_dir = find_containing_dir(packed_dir, base, 0); loose_dir = find_containing_dir(loose_dir, base, 0); } + if (loose_dir) + prime_ref_dir(loose_dir); + + packed_ref_cache = get_packed_ref_cache(refs); + acquire_packed_ref_cache(packed_ref_cache); + packed_dir = get_packed_ref_dir(packed_ref_cache); + if (base && *base) { + packed_dir = find_containing_dir(packed_dir, base, 0); + } if (packed_dir && loose_dir) { sort_ref_dir(packed_dir); sort_ref_dir(loose_dir); - retval = do_for_each_ref_in_dirs( - packed_dir, loose_dir, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dirs( + packed_dir, loose_dir, fn, cb_data); } else if (packed_dir) { sort_ref_dir(packed_dir); - retval = do_for_each_ref_in_dir( - packed_dir, 0, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dir( + packed_dir, 0, fn, cb_data); } else if (loose_dir) { sort_ref_dir(loose_dir); - retval = do_for_each_ref_in_dir( - loose_dir, 0, - base, fn, trim, flags, cb_data); + retval = do_for_each_entry_in_dir( + loose_dir, 0, fn, cb_data); } + release_packed_ref_cache(packed_ref_cache); return retval; } +/* + * Call fn for each reference in the specified ref_cache for which the + * refname begins with base. If trim is non-zero, then trim that many + * characters off the beginning of each refname before passing the + * refname to fn. flags can be DO_FOR_EACH_INCLUDE_BROKEN to include + * broken references in the iteration. If fn ever returns a non-zero + * value, stop the iteration and return that value; otherwise, return + * 0. + */ +static int do_for_each_ref(struct ref_cache *refs, const char *base, + each_ref_fn fn, int trim, int flags, void *cb_data) +{ + struct ref_entry_cb data; + data.base = base; + data.trim = trim; + data.flags = flags; + data.fn = fn; + data.cb_data = cb_data; + + return do_for_each_entry(refs, base, do_one_ref, &data); +} + static int do_head_ref(const char *submodule, each_ref_fn fn, void *cb_data) { unsigned char sha1[20]; @@ -1380,23 +1758,23 @@ int head_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data) int for_each_ref(each_ref_fn fn, void *cb_data) { - return do_for_each_ref(NULL, "", fn, 0, 0, cb_data); + return do_for_each_ref(&ref_cache, "", fn, 0, 0, cb_data); } int for_each_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data) { - return do_for_each_ref(submodule, "", fn, 0, 0, cb_data); + return do_for_each_ref(get_ref_cache(submodule), "", fn, 0, 0, cb_data); } int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data) { - return do_for_each_ref(NULL, prefix, fn, strlen(prefix), 0, cb_data); + return do_for_each_ref(&ref_cache, prefix, fn, strlen(prefix), 0, cb_data); } int for_each_ref_in_submodule(const char *submodule, const char *prefix, each_ref_fn fn, void *cb_data) { - return do_for_each_ref(submodule, prefix, fn, strlen(prefix), 0, cb_data); + return do_for_each_ref(get_ref_cache(submodule), prefix, fn, strlen(prefix), 0, cb_data); } int for_each_tag_ref(each_ref_fn fn, void *cb_data) @@ -1431,7 +1809,7 @@ int for_each_remote_ref_submodule(const char *submodule, each_ref_fn fn, void *c int for_each_replace_ref(each_ref_fn fn, void *cb_data) { - return do_for_each_ref(NULL, "refs/replace/", fn, 13, 0, cb_data); + return do_for_each_ref(&ref_cache, "refs/replace/", fn, 13, 0, cb_data); } int head_ref_namespaced(each_ref_fn fn, void *cb_data) @@ -1454,7 +1832,7 @@ int for_each_namespaced_ref(each_ref_fn fn, void *cb_data) struct strbuf buf = STRBUF_INIT; int ret; strbuf_addf(&buf, "%srefs/", get_git_namespace()); - ret = do_for_each_ref(NULL, buf.buf, fn, 0, 0, cb_data); + ret = do_for_each_ref(&ref_cache, buf.buf, fn, 0, 0, cb_data); strbuf_release(&buf); return ret; } @@ -1496,7 +1874,7 @@ int for_each_glob_ref(each_ref_fn fn, const char *pattern, void *cb_data) int for_each_rawref(each_ref_fn fn, void *cb_data) { - return do_for_each_ref(NULL, "", fn, 0, + return do_for_each_ref(&ref_cache, "", fn, 0, DO_FOR_EACH_INCLUDE_BROKEN, cb_data); } @@ -1702,7 +2080,7 @@ static struct ref_lock *lock_ref_sha1_basic(const char *refname, * name is a proper prefix of our refname. */ if (missing && - !is_refname_available(refname, NULL, get_packed_refs(get_ref_cache(NULL)))) { + !is_refname_available(refname, NULL, get_packed_refs(&ref_cache))) { last_errno = ENOTDIR; goto error_return; } @@ -1754,48 +2132,325 @@ struct ref_lock *lock_any_ref_for_update(const char *refname, return lock_ref_sha1_basic(refname, old_sha1, flags, NULL); } -struct repack_without_ref_sb { - const char *refname; - int fd; -}; - -static int repack_without_ref_fn(const char *refname, const unsigned char *sha1, - int flags, void *cb_data) +/* + * Write an entry to the packed-refs file for the specified refname. + * If peeled is non-NULL, write it as the entry's peeled value. + */ +static void write_packed_entry(int fd, char *refname, unsigned char *sha1, + unsigned char *peeled) { - struct repack_without_ref_sb *data = cb_data; char line[PATH_MAX + 100]; int len; - if (!strcmp(data->refname, refname)) - return 0; len = snprintf(line, sizeof(line), "%s %s\n", sha1_to_hex(sha1), refname); /* this should not happen but just being defensive */ if (len > sizeof(line)) die("too long a refname '%s'", refname); - write_or_die(data->fd, line, len); + write_or_die(fd, line, len); + + if (peeled) { + if (snprintf(line, sizeof(line), "^%s\n", + sha1_to_hex(peeled)) != PEELED_LINE_LENGTH) + die("internal error"); + write_or_die(fd, line, PEELED_LINE_LENGTH); + } +} + +/* + * An each_ref_entry_fn that writes the entry to a packed-refs file. + */ +static int write_packed_entry_fn(struct ref_entry *entry, void *cb_data) +{ + int *fd = cb_data; + enum peel_status peel_status = peel_entry(entry, 0); + + if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG) + error("internal error: %s is not a valid packed reference!", + entry->name); + write_packed_entry(*fd, entry->name, entry->u.value.sha1, + peel_status == PEEL_PEELED ? + entry->u.value.peeled : NULL); return 0; } -static struct lock_file packlock; +int lock_packed_refs(int flags) +{ + struct packed_ref_cache *packed_ref_cache; -static int repack_without_ref(const char *refname) + if (hold_lock_file_for_update(&packlock, git_path("packed-refs"), flags) < 0) + return -1; + /* + * Get the current packed-refs while holding the lock. If the + * packed-refs file has been modified since we last read it, + * this will automatically invalidate the cache and re-read + * the packed-refs file. + */ + packed_ref_cache = get_packed_ref_cache(&ref_cache); + packed_ref_cache->lock = &packlock; + /* Increment the reference count to prevent it from being freed: */ + acquire_packed_ref_cache(packed_ref_cache); + return 0; +} + +int commit_packed_refs(void) +{ + struct packed_ref_cache *packed_ref_cache = + get_packed_ref_cache(&ref_cache); + int error = 0; + + if (!packed_ref_cache->lock) + die("internal error: packed-refs not locked"); + write_or_die(packed_ref_cache->lock->fd, + PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER)); + + do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache), + 0, write_packed_entry_fn, + &packed_ref_cache->lock->fd); + if (commit_lock_file(packed_ref_cache->lock)) + error = -1; + packed_ref_cache->lock = NULL; + release_packed_ref_cache(packed_ref_cache); + return error; +} + +void rollback_packed_refs(void) { - struct repack_without_ref_sb data; - struct ref_cache *refs = get_ref_cache(NULL); - struct ref_dir *packed = get_packed_refs(refs); - if (find_ref(packed, refname) == NULL) + struct packed_ref_cache *packed_ref_cache = + get_packed_ref_cache(&ref_cache); + + if (!packed_ref_cache->lock) + die("internal error: packed-refs not locked"); + rollback_lock_file(packed_ref_cache->lock); + packed_ref_cache->lock = NULL; + release_packed_ref_cache(packed_ref_cache); + clear_packed_ref_cache(&ref_cache); +} + +struct ref_to_prune { + struct ref_to_prune *next; + unsigned char sha1[20]; + char name[FLEX_ARRAY]; +}; + +struct pack_refs_cb_data { + unsigned int flags; + struct ref_dir *packed_refs; + struct ref_to_prune *ref_to_prune; +}; + +/* + * An each_ref_entry_fn that is run over loose references only. If + * the loose reference can be packed, add an entry in the packed ref + * cache. If the reference should be pruned, also add it to + * ref_to_prune in the pack_refs_cb_data. + */ +static int pack_if_possible_fn(struct ref_entry *entry, void *cb_data) +{ + struct pack_refs_cb_data *cb = cb_data; + enum peel_status peel_status; + struct ref_entry *packed_entry; + int is_tag_ref = !prefixcmp(entry->name, "refs/tags/"); + + /* ALWAYS pack tags */ + if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref) return 0; - data.refname = refname; - data.fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"), 0); - if (data.fd < 0) { + + /* Do not pack symbolic or broken refs: */ + if ((entry->flag & REF_ISSYMREF) || !ref_resolves_to_object(entry)) + return 0; + + /* Add a packed ref cache entry equivalent to the loose entry. */ + peel_status = peel_entry(entry, 1); + if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG) + die("internal error peeling reference %s (%s)", + entry->name, sha1_to_hex(entry->u.value.sha1)); + packed_entry = find_ref(cb->packed_refs, entry->name); + if (packed_entry) { + /* Overwrite existing packed entry with info from loose entry */ + packed_entry->flag = REF_ISPACKED | REF_KNOWS_PEELED; + hashcpy(packed_entry->u.value.sha1, entry->u.value.sha1); + } else { + packed_entry = create_ref_entry(entry->name, entry->u.value.sha1, + REF_ISPACKED | REF_KNOWS_PEELED, 0); + add_ref(cb->packed_refs, packed_entry); + } + hashcpy(packed_entry->u.value.peeled, entry->u.value.peeled); + + /* Schedule the loose reference for pruning if requested. */ + if ((cb->flags & PACK_REFS_PRUNE)) { + int namelen = strlen(entry->name) + 1; + struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen); + hashcpy(n->sha1, entry->u.value.sha1); + strcpy(n->name, entry->name); + n->next = cb->ref_to_prune; + cb->ref_to_prune = n; + } + return 0; +} + +/* + * Remove empty parents, but spare refs/ and immediate subdirs. + * Note: munges *name. + */ +static void try_remove_empty_parents(char *name) +{ + char *p, *q; + int i; + p = name; + for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */ + while (*p && *p != '/') + p++; + /* tolerate duplicate slashes; see check_refname_format() */ + while (*p == '/') + p++; + } + for (q = p; *q; q++) + ; + while (1) { + while (q > p && *q != '/') + q--; + while (q > p && *(q-1) == '/') + q--; + if (q == p) + break; + *q = '\0'; + if (rmdir(git_path("%s", name))) + break; + } +} + +/* make sure nobody touched the ref, and unlink */ +static void prune_ref(struct ref_to_prune *r) +{ + struct ref_lock *lock = lock_ref_sha1(r->name + 5, r->sha1); + + if (lock) { + unlink_or_warn(git_path("%s", r->name)); + unlock_ref(lock); + try_remove_empty_parents(r->name); + } +} + +static void prune_refs(struct ref_to_prune *r) +{ + while (r) { + prune_ref(r); + r = r->next; + } +} + +int pack_refs(unsigned int flags) +{ + struct pack_refs_cb_data cbdata; + + memset(&cbdata, 0, sizeof(cbdata)); + cbdata.flags = flags; + + lock_packed_refs(LOCK_DIE_ON_ERROR); + cbdata.packed_refs = get_packed_refs(&ref_cache); + + do_for_each_entry_in_dir(get_loose_refs(&ref_cache), 0, + pack_if_possible_fn, &cbdata); + + if (commit_packed_refs()) + die_errno("unable to overwrite old ref-pack file"); + + prune_refs(cbdata.ref_to_prune); + return 0; +} + +/* + * If entry is no longer needed in packed-refs, add it to the string + * list pointed to by cb_data. Reasons for deleting entries: + * + * - Entry is broken. + * - Entry is overridden by a loose ref. + * - Entry does not point at a valid object. + * + * In the first and third cases, also emit an error message because these + * are indications of repository corruption. + */ +static int curate_packed_ref_fn(struct ref_entry *entry, void *cb_data) +{ + struct string_list *refs_to_delete = cb_data; + + if (entry->flag & REF_ISBROKEN) { + /* This shouldn't happen to packed refs. */ + error("%s is broken!", entry->name); + string_list_append(refs_to_delete, entry->name); + return 0; + } + if (!has_sha1_file(entry->u.value.sha1)) { + unsigned char sha1[20]; + int flags; + + if (read_ref_full(entry->name, sha1, 0, &flags)) + /* We should at least have found the packed ref. */ + die("Internal error"); + if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED)) { + /* + * This packed reference is overridden by a + * loose reference, so it is OK that its value + * is no longer valid; for example, it might + * refer to an object that has been garbage + * collected. For this purpose we don't even + * care whether the loose reference itself is + * invalid, broken, symbolic, etc. Silently + * remove the packed reference. + */ + string_list_append(refs_to_delete, entry->name); + return 0; + } + /* + * There is no overriding loose reference, so the fact + * that this reference doesn't refer to a valid object + * indicates some kind of repository corruption. + * Report the problem, then omit the reference from + * the output. + */ + error("%s does not point to a valid object!", entry->name); + string_list_append(refs_to_delete, entry->name); + return 0; + } + + return 0; +} + +static int repack_without_ref(const char *refname) +{ + struct ref_dir *packed; + struct string_list refs_to_delete = STRING_LIST_INIT_DUP; + struct string_list_item *ref_to_delete; + + if (!get_packed_ref(refname)) + return 0; /* refname does not exist in packed refs */ + + if (lock_packed_refs(0)) { unable_to_lock_error(git_path("packed-refs"), errno); return error("cannot delete '%s' from packed refs", refname); } - clear_packed_ref_cache(refs); - packed = get_packed_refs(refs); - do_for_each_ref_in_dir(packed, 0, "", repack_without_ref_fn, 0, 0, &data); - return commit_lock_file(&packlock); + packed = get_packed_refs(&ref_cache); + + /* Remove refname from the cache: */ + if (remove_entry(packed, refname) == -1) { + /* + * The packed entry disappeared while we were + * acquiring the lock. + */ + rollback_packed_refs(); + return 0; + } + + /* Remove any other accumulated cruft: */ + do_for_each_entry_in_dir(packed, 0, curate_packed_ref_fn, &refs_to_delete); + for_each_string_list_item(ref_to_delete, &refs_to_delete) { + if (remove_entry(packed, ref_to_delete->string) == -1) + die("internal error"); + } + + /* Write what remains: */ + return commit_packed_refs(); } int delete_ref(const char *refname, const unsigned char *sha1, int delopt) @@ -1823,7 +2478,7 @@ int delete_ref(const char *refname, const unsigned char *sha1, int delopt) ret |= repack_without_ref(lock->ref_name); unlink_or_warn(git_path("logs/%s", lock->ref_name)); - invalidate_ref_cache(NULL); + clear_loose_ref_cache(&ref_cache); unlock_ref(lock); return ret; } @@ -1845,7 +2500,6 @@ int rename_ref(const char *oldrefname, const char *newrefname, const char *logms struct stat loginfo; int log = !lstat(git_path("logs/%s", oldrefname), &loginfo); const char *symref = NULL; - struct ref_cache *refs = get_ref_cache(NULL); if (log && S_ISLNK(loginfo.st_mode)) return error("reflog for %s is a symlink", oldrefname); @@ -1857,10 +2511,10 @@ int rename_ref(const char *oldrefname, const char *newrefname, const char *logms if (!symref) return error("refname %s not found", oldrefname); - if (!is_refname_available(newrefname, oldrefname, get_packed_refs(refs))) + if (!is_refname_available(newrefname, oldrefname, get_packed_refs(&ref_cache))) return 1; - if (!is_refname_available(newrefname, oldrefname, get_loose_refs(refs))) + if (!is_refname_available(newrefname, oldrefname, get_loose_refs(&ref_cache))) return 1; if (log && rename(git_path("logs/%s", oldrefname), git_path(TMP_RENAMED_LOG))) @@ -2116,7 +2770,7 @@ int write_ref_sha1(struct ref_lock *lock, unlock_ref(lock); return -1; } - clear_loose_ref_cache(get_ref_cache(NULL)); + clear_loose_ref_cache(&ref_cache); if (log_ref_write(lock->ref_name, lock->old_sha1, sha1, logmsg) < 0 || (strcmp(lock->ref_name, lock->orig_ref_name) && log_ref_write(lock->orig_ref_name, lock->old_sha1, sha1, logmsg) < 0)) { |