diff options
Diffstat (limited to 'refs/files-backend.c')
-rw-r--r-- | refs/files-backend.c | 737 |
1 files changed, 476 insertions, 261 deletions
diff --git a/refs/files-backend.c b/refs/files-backend.c index fccbc24ac4..bec8e30e9e 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -10,9 +10,54 @@ #include "../object.h" #include "../dir.h" +/* + * This backend uses the following flags in `ref_update::flags` for + * internal bookkeeping purposes. Their numerical values must not + * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW, + * REF_HAVE_OLD, or REF_IS_PRUNING, which are also stored in + * `ref_update::flags`. + */ + +/* + * Used as a flag in ref_update::flags when a loose ref is being + * pruned. This flag must only be used when REF_NO_DEREF is set. + */ +#define REF_IS_PRUNING (1 << 4) + +/* + * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken + * refs (i.e., because the reference is about to be deleted anyway). + */ +#define REF_DELETING (1 << 5) + +/* + * Used as a flag in ref_update::flags when the lockfile needs to be + * committed. + */ +#define REF_NEEDS_COMMIT (1 << 6) + +/* + * Used as a flag in ref_update::flags when we want to log a ref + * update but not actually perform it. This is used when a symbolic + * ref update is split up. + */ +#define REF_LOG_ONLY (1 << 7) + +/* + * Used as a flag in ref_update::flags when the ref_update was via an + * update to HEAD. + */ +#define REF_UPDATE_VIA_HEAD (1 << 8) + +/* + * Used as a flag in ref_update::flags when the loose reference has + * been deleted. + */ +#define REF_DELETED_LOOSE (1 << 9) + struct ref_lock { char *ref_name; - struct lock_file *lk; + struct lock_file lk; struct object_id old_oid; }; @@ -106,15 +151,6 @@ static void files_reflog_path(struct files_ref_store *refs, struct strbuf *sb, const char *refname) { - if (!refname) { - /* - * FIXME: of course this is wrong in multi worktree - * setting. To be fixed real soon. - */ - strbuf_addf(sb, "%s/logs", refs->gitcommondir); - return; - } - switch (ref_type(refname)) { case REF_TYPE_PER_WORKTREE: case REF_TYPE_PSEUDOREF: @@ -198,13 +234,13 @@ static void loose_fill_ref_dir(struct ref_store *ref_store, if (!refs_resolve_ref_unsafe(&refs->base, refname.buf, RESOLVE_REF_READING, - oid.hash, &flag)) { + &oid, &flag)) { oidclr(&oid); flag |= REF_ISBROKEN; } else if (is_null_oid(&oid)) { /* * It is so astronomically unlikely - * that NULL_SHA1 is the SHA-1 of an + * that null_oid is the OID of an * actual object that we consider its * appearance in a loose reference * file to be repo corruption @@ -270,7 +306,7 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs) } static int files_read_raw_ref(struct ref_store *ref_store, - const char *refname, unsigned char *sha1, + const char *refname, struct object_id *oid, struct strbuf *referent, unsigned int *type) { struct files_ref_store *refs = @@ -279,6 +315,7 @@ static int files_read_raw_ref(struct ref_store *ref_store, struct strbuf sb_path = STRBUF_INIT; const char *path; const char *buf; + const char *p; struct stat st; int fd; int ret = -1; @@ -313,7 +350,7 @@ stat_ref: if (errno != ENOENT) goto out; if (refs_read_raw_ref(refs->packed_ref_store, refname, - sha1, referent, type)) { + oid, referent, type)) { errno = ENOENT; goto out; } @@ -353,7 +390,7 @@ stat_ref: * packed ref: */ if (refs_read_raw_ref(refs->packed_ref_store, refname, - sha1, referent, type)) { + oid, referent, type)) { errno = EISDIR; goto out; } @@ -399,8 +436,8 @@ stat_ref: * Please note that FETCH_HEAD has additional * data after the sha. */ - if (get_sha1_hex(buf, sha1) || - (buf[40] != '\0' && !isspace(buf[40]))) { + if (parse_oid_hex(buf, oid, &p) || + (*p != '\0' && !isspace(*p))) { *type |= REF_ISBROKEN; errno = EINVAL; goto out; @@ -418,9 +455,7 @@ out: static void unlock_ref(struct ref_lock *lock) { - /* Do not free lock->lk -- atexit() still looks at them */ - if (lock->lk) - rollback_lock_file(lock->lk); + rollback_lock_file(&lock->lk); free(lock->ref_name); free(lock); } @@ -438,7 +473,7 @@ static void unlock_ref(struct ref_lock *lock) * are passed to refs_verify_refname_available() for this check. * * If mustexist is not set and the reference is not found or is - * broken, lock the reference anyway but clear sha1. + * broken, lock the reference anyway but clear old_oid. * * Return 0 on success. On failure, write an error message to err and * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR. @@ -534,11 +569,8 @@ retry: goto error_return; } - if (!lock->lk) - lock->lk = xcalloc(1, sizeof(struct lock_file)); - if (hold_lock_file_for_update_timeout( - lock->lk, ref_file.buf, LOCK_NO_DEREF, + &lock->lk, ref_file.buf, LOCK_NO_DEREF, get_files_ref_lock_timeout_ms()) < 0) { if (errno == ENOENT && --attempts_remaining > 0) { /* @@ -559,7 +591,7 @@ retry: */ if (files_read_raw_ref(&refs->base, refname, - lock->old_oid.hash, referent, type)) { + &lock->old_oid, referent, type)) { if (errno == ENOENT) { if (mustexist) { /* Garden variety missing reference. */ @@ -655,43 +687,6 @@ out: return ret; } -static int files_peel_ref(struct ref_store *ref_store, - const char *refname, unsigned char *sha1) -{ - struct files_ref_store *refs = - files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, - "peel_ref"); - int flag; - unsigned char base[20]; - - if (current_ref_iter && current_ref_iter->refname == refname) { - struct object_id peeled; - - if (ref_iterator_peel(current_ref_iter, &peeled)) - return -1; - hashcpy(sha1, peeled.hash); - return 0; - } - - if (refs_read_ref_full(ref_store, refname, - RESOLVE_REF_READING, base, &flag)) - return -1; - - /* - * If the reference is packed, read its ref_entry from the - * cache in the hope that we already know its peeled value. - * We only try this optimization on packed references because - * (a) forcing the filling of the loose reference cache could - * be expensive and (b) loose references anyway usually do not - * have REF_KNOWS_PEELED. - */ - if (flag & REF_ISPACKED && - !refs_peel_ref(refs->packed_ref_store, refname, sha1)) - return 0; - - return peel_object(base, sha1); -} - struct files_ref_iterator { struct ref_iterator base; @@ -762,7 +757,7 @@ static struct ref_iterator *files_ref_iterator_begin( const char *prefix, unsigned int flags) { struct files_ref_store *refs; - struct ref_iterator *loose_iter, *packed_iter; + struct ref_iterator *loose_iter, *packed_iter, *overlay_iter; struct files_ref_iterator *iter; struct ref_iterator *ref_iterator; unsigned int required_flags = REF_STORE_READ; @@ -772,10 +767,6 @@ static struct ref_iterator *files_ref_iterator_begin( refs = files_downcast(ref_store, required_flags, "ref_iterator_begin"); - iter = xcalloc(1, sizeof(*iter)); - ref_iterator = &iter->base; - base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable); - /* * We must make sure that all loose refs are read before * accessing the packed-refs file; this avoids a race @@ -811,28 +802,34 @@ static struct ref_iterator *files_ref_iterator_begin( refs->packed_ref_store, prefix, 0, DO_FOR_EACH_INCLUDE_BROKEN); - iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter); + overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter); + + iter = xcalloc(1, sizeof(*iter)); + ref_iterator = &iter->base; + base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable, + overlay_iter->ordered); + iter->iter0 = overlay_iter; iter->flags = flags; return ref_iterator; } /* - * Verify that the reference locked by lock has the value old_sha1. - * Fail if the reference doesn't exist and mustexist is set. Return 0 - * on success. On error, write an error message to err, set errno, and - * return a negative value. + * Verify that the reference locked by lock has the value old_oid + * (unless it is NULL). Fail if the reference doesn't exist and + * mustexist is set. Return 0 on success. On error, write an error + * message to err, set errno, and return a negative value. */ static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock, - const unsigned char *old_sha1, int mustexist, + const struct object_id *old_oid, int mustexist, struct strbuf *err) { assert(err); if (refs_read_ref_full(ref_store, lock->ref_name, mustexist ? RESOLVE_REF_READING : 0, - lock->old_oid.hash, NULL)) { - if (old_sha1) { + &lock->old_oid, NULL)) { + if (old_oid) { int save_errno = errno; strbuf_addf(err, "can't verify ref '%s'", lock->ref_name); errno = save_errno; @@ -842,11 +839,11 @@ static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock, return 0; } } - if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) { + if (old_oid && oidcmp(&lock->old_oid, old_oid)) { strbuf_addf(err, "ref '%s' is at %s but expected %s", lock->ref_name, oid_to_hex(&lock->old_oid), - sha1_to_hex(old_sha1)); + oid_to_hex(old_oid)); errno = EBUSY; return -1; } @@ -876,22 +873,22 @@ static int create_reflock(const char *path, void *cb) * Locks a ref returning the lock on success and NULL on failure. * On failure errno is set to something meaningful. */ -static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs, - const char *refname, - const unsigned char *old_sha1, - const struct string_list *extras, - const struct string_list *skip, - unsigned int flags, int *type, - struct strbuf *err) +static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs, + const char *refname, + const struct object_id *old_oid, + const struct string_list *extras, + const struct string_list *skip, + unsigned int flags, int *type, + struct strbuf *err) { struct strbuf ref_file = STRBUF_INIT; struct ref_lock *lock; int last_errno = 0; - int mustexist = (old_sha1 && !is_null_sha1(old_sha1)); + int mustexist = (old_oid && !is_null_oid(old_oid)); int resolve_flags = RESOLVE_REF_NO_RECURSE; int resolved; - files_assert_main_repository(refs, "lock_ref_sha1_basic"); + files_assert_main_repository(refs, "lock_ref_oid_basic"); assert(err); lock = xcalloc(1, sizeof(struct ref_lock)); @@ -904,7 +901,7 @@ static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs, files_ref_path(refs, &ref_file, refname); resolved = !!refs_resolve_ref_unsafe(&refs->base, refname, resolve_flags, - lock->old_oid.hash, type); + &lock->old_oid, type); if (!resolved && errno == EISDIR) { /* * we are trying to lock foo but we used to @@ -923,7 +920,7 @@ static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs, } resolved = !!refs_resolve_ref_unsafe(&refs->base, refname, resolve_flags, - lock->old_oid.hash, type); + &lock->old_oid, type); } if (!resolved) { last_errno = errno; @@ -949,17 +946,15 @@ static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs, goto error_return; } - lock->lk = xcalloc(1, sizeof(struct lock_file)); - lock->ref_name = xstrdup(refname); - if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) { + if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) { last_errno = errno; unable_to_lock_message(ref_file.buf, errno, err); goto error_return; } - if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) { + if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) { last_errno = errno; goto error_return; } @@ -977,7 +972,7 @@ static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs, struct ref_to_prune { struct ref_to_prune *next; - unsigned char sha1[20]; + struct object_id oid; char name[FLEX_ARRAY]; }; @@ -1039,29 +1034,42 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r) { struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; + int ret = -1; if (check_refname_format(r->name, 0)) return; transaction = ref_store_transaction_begin(&refs->base, &err); - if (!transaction || - ref_transaction_delete(transaction, r->name, r->sha1, - REF_ISPRUNING | REF_NODEREF, NULL, &err) || - ref_transaction_commit(transaction, &err)) { - ref_transaction_free(transaction); + if (!transaction) + goto cleanup; + ref_transaction_add_update( + transaction, r->name, + REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING, + &null_oid, &r->oid, NULL); + if (ref_transaction_commit(transaction, &err)) + goto cleanup; + + ret = 0; + +cleanup: + if (ret) error("%s", err.buf); - strbuf_release(&err); - return; - } - ref_transaction_free(transaction); strbuf_release(&err); + ref_transaction_free(transaction); + return; } -static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r) +/* + * Prune the loose versions of the references in the linked list + * `*refs_to_prune`, freeing the entries in the list as we go. + */ +static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune) { - while (r) { + while (*refs_to_prune) { + struct ref_to_prune *r = *refs_to_prune; + *refs_to_prune = r->next; prune_ref(refs, r); - r = r->next; + free(r); } } @@ -1100,6 +1108,11 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags) int ok; struct ref_to_prune *refs_to_prune = NULL; struct strbuf err = STRBUF_INIT; + struct ref_transaction *transaction; + + transaction = ref_store_transaction_begin(refs->packed_ref_store, &err); + if (!transaction) + return -1; packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err); @@ -1115,18 +1128,20 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags) continue; /* - * Create an entry in the packed-refs cache equivalent - * to the one from the loose ref cache, except that - * we don't copy the peeled status, because we want it - * to be re-peeled. + * Add a reference creation for this reference to the + * packed-refs transaction: */ - add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid); + if (ref_transaction_update(transaction, iter->refname, + iter->oid, NULL, + REF_NO_DEREF, NULL, &err)) + die("failure preparing to create packed reference %s: %s", + iter->refname, err.buf); /* Schedule the loose reference for pruning if requested. */ if ((flags & PACK_REFS_PRUNE)) { struct ref_to_prune *n; FLEX_ALLOC_STR(n, name, iter->refname); - hashcpy(n->sha1, iter->oid->hash); + oidcpy(&n->oid, iter->oid); n->next = refs_to_prune; refs_to_prune = n; } @@ -1134,11 +1149,14 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags) if (ok != ITER_DONE) die("error while iterating over references"); - if (commit_packed_refs(refs->packed_ref_store, &err)) - die("unable to overwrite old ref-pack file: %s", err.buf); + if (ref_transaction_commit(transaction, &err)) + die("unable to write new packed-refs: %s", err.buf); + + ref_transaction_free(transaction); + packed_refs_unlock(refs->packed_ref_store); - prune_refs(refs, refs_to_prune); + prune_refs(refs, &refs_to_prune); strbuf_release(&err); return 0; } @@ -1157,7 +1175,7 @@ static int files_delete_refs(struct ref_store *ref_store, const char *msg, if (packed_refs_lock(refs->packed_ref_store, 0, &err)) goto error; - if (repack_without_refs(refs->packed_ref_store, refnames, &err)) { + if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) { packed_refs_unlock(refs->packed_ref_store); goto error; } @@ -1258,9 +1276,9 @@ static int commit_ref_update(struct files_ref_store *refs, const struct object_id *oid, const char *logmsg, struct strbuf *err); -static int files_rename_ref(struct ref_store *ref_store, +static int files_copy_or_rename_ref(struct ref_store *ref_store, const char *oldrefname, const char *newrefname, - const char *logmsg) + const char *logmsg, int copy) { struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_WRITE, "rename_ref"); @@ -1286,14 +1304,18 @@ static int files_rename_ref(struct ref_store *ref_store, if (!refs_resolve_ref_unsafe(&refs->base, oldrefname, RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, - orig_oid.hash, &flag)) { + &orig_oid, &flag)) { ret = error("refname %s not found", oldrefname); goto out; } if (flag & REF_ISSYMREF) { - ret = error("refname %s is a symbolic ref, renaming it is not supported", - oldrefname); + if (copy) + ret = error("refname %s is a symbolic ref, copying it is not supported", + oldrefname); + else + ret = error("refname %s is a symbolic ref, renaming it is not supported", + oldrefname); goto out; } if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) { @@ -1301,14 +1323,20 @@ static int files_rename_ref(struct ref_store *ref_store, goto out; } - if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) { + if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) { ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", oldrefname, strerror(errno)); goto out; } - if (refs_delete_ref(&refs->base, logmsg, oldrefname, - orig_oid.hash, REF_NODEREF)) { + if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) { + ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s", + oldrefname, strerror(errno)); + goto out; + } + + if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname, + &orig_oid, REF_NO_DEREF)) { error("unable to delete old %s", oldrefname); goto rollback; } @@ -1320,11 +1348,11 @@ static int files_rename_ref(struct ref_store *ref_store, * the safety anyway; we want to delete the reference whatever * its current value. */ - if (!refs_read_ref_full(&refs->base, newrefname, + if (!copy && !refs_read_ref_full(&refs->base, newrefname, RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE, - oid.hash, NULL) && + &oid, NULL) && refs_delete_ref(&refs->base, NULL, newrefname, - NULL, REF_NODEREF)) { + NULL, REF_NO_DEREF)) { if (errno == EISDIR) { struct strbuf path = STRBUF_INIT; int result; @@ -1348,10 +1376,13 @@ static int files_rename_ref(struct ref_store *ref_store, logmoved = log; - lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL, - REF_NODEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL, + REF_NO_DEREF, NULL, &err); if (!lock) { - error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf); + if (copy) + error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf); + else + error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf); strbuf_release(&err); goto rollback; } @@ -1368,8 +1399,8 @@ static int files_rename_ref(struct ref_store *ref_store, goto out; rollback: - lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL, - REF_NODEREF, NULL, &err); + lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL, + REF_NO_DEREF, NULL, &err); if (!lock) { error("unable to lock %s for rollback: %s", oldrefname, err.buf); strbuf_release(&err); @@ -1402,16 +1433,32 @@ static int files_rename_ref(struct ref_store *ref_store, return ret; } -static int close_ref(struct ref_lock *lock) +static int files_rename_ref(struct ref_store *ref_store, + const char *oldrefname, const char *newrefname, + const char *logmsg) { - if (close_lock_file(lock->lk)) + return files_copy_or_rename_ref(ref_store, oldrefname, + newrefname, logmsg, 0); +} + +static int files_copy_ref(struct ref_store *ref_store, + const char *oldrefname, const char *newrefname, + const char *logmsg) +{ + return files_copy_or_rename_ref(ref_store, oldrefname, + newrefname, logmsg, 1); +} + +static int close_ref_gently(struct ref_lock *lock) +{ + if (close_lock_file_gently(&lock->lk)) return -1; return 0; } static int commit_ref(struct ref_lock *lock) { - char *path = get_locked_file_path(lock->lk); + char *path = get_locked_file_path(&lock->lk); struct stat st; if (!lstat(path, &st) && S_ISDIR(st.st_mode)) { @@ -1435,7 +1482,7 @@ static int commit_ref(struct ref_lock *lock) free(path); } - if (commit_lock_file(lock->lk)) + if (commit_lock_file(&lock->lk)) return -1; return 0; } @@ -1549,7 +1596,7 @@ static int log_ref_write_fd(int fd, const struct object_id *old_oid, written = len <= maxlen ? write_in_full(fd, logrec, len) : -1; free(logrec); - if (written != len) + if (written < 0) return -1; return 0; @@ -1601,9 +1648,8 @@ static int files_log_ref_write(struct files_ref_store *refs, } /* - * Write sha1 into the open lockfile, then close the lockfile. On - * errors, rollback the lockfile, fill in *err and - * return -1. + * Write oid into the open lockfile, then close the lockfile. On + * errors, rollback the lockfile, fill in *err and return -1. */ static int write_ref_to_lockfile(struct ref_lock *lock, const struct object_id *oid, struct strbuf *err) @@ -1627,12 +1673,12 @@ static int write_ref_to_lockfile(struct ref_lock *lock, unlock_ref(lock); return -1; } - fd = get_lock_file_fd(lock->lk); - if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ || - write_in_full(fd, &term, 1) != 1 || - close_ref(lock) < 0) { + fd = get_lock_file_fd(&lock->lk); + if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 || + write_in_full(fd, &term, 1) < 0 || + close_ref_gently(lock) < 0) { strbuf_addf(err, - "couldn't write '%s'", get_lock_file_path(lock->lk)); + "couldn't write '%s'", get_lock_file_path(&lock->lk)); unlock_ref(lock); return -1; } @@ -1676,13 +1722,12 @@ static int commit_ref_update(struct files_ref_store *refs, * check with HEAD only which should cover 99% of all usage * scenarios (even 100% of the default ones). */ - struct object_id head_oid; int head_flag; const char *head_ref; head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD", RESOLVE_REF_READING, - head_oid.hash, &head_flag); + NULL, &head_flag); if (head_ref && (head_flag & REF_ISSYMREF) && !strcmp(head_ref, lock->ref_name)) { struct strbuf log_err = STRBUF_INIT; @@ -1709,7 +1754,7 @@ static int create_ref_symlink(struct ref_lock *lock, const char *target) { int ret = -1; #ifndef NO_SYMLINK_HEAD - char *ref_path = get_locked_file_path(lock->lk); + char *ref_path = get_locked_file_path(&lock->lk); unlink(ref_path); ret = symlink(target, ref_path); free(ref_path); @@ -1728,7 +1773,7 @@ static void update_symref_reflog(struct files_ref_store *refs, struct object_id new_oid; if (logmsg && !refs_read_ref_full(&refs->base, target, - RESOLVE_REF_READING, new_oid.hash, NULL) && + RESOLVE_REF_READING, &new_oid, NULL) && files_log_ref_write(refs, refname, &lock->old_oid, &new_oid, logmsg, 0, &err)) { error("%s", err.buf); @@ -1745,14 +1790,14 @@ static int create_symref_locked(struct files_ref_store *refs, return 0; } - if (!fdopen_lock_file(lock->lk, "w")) + if (!fdopen_lock_file(&lock->lk, "w")) return error("unable to fdopen %s: %s", - lock->lk->tempfile.filename.buf, strerror(errno)); + lock->lk.tempfile->filename.buf, strerror(errno)); update_symref_reflog(refs, lock, refname, target, logmsg); /* no error check; commit_ref will check ferror */ - fprintf(lock->lk->tempfile.fp, "ref: %s\n", target); + fprintf(lock->lk.tempfile->fp, "ref: %s\n", target); if (commit_ref(lock) < 0) return error("unable to write symref for %s: %s", refname, strerror(errno)); @@ -1769,9 +1814,9 @@ static int files_create_symref(struct ref_store *ref_store, struct ref_lock *lock; int ret; - lock = lock_ref_sha1_basic(refs, refname, NULL, - NULL, NULL, REF_NODEREF, NULL, - &err); + lock = lock_ref_oid_basic(refs, refname, NULL, + NULL, NULL, REF_NO_DEREF, NULL, + &err); if (!lock) { error("%s", err.buf); strbuf_release(&err); @@ -2017,7 +2062,7 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator) if (refs_read_ref_full(iter->ref_store, diter->relative_path, 0, - iter->oid.hash, &flags)) { + &iter->oid, &flags)) { error("bad ref for %s", diter->path.buf); continue; } @@ -2059,23 +2104,64 @@ static struct ref_iterator_vtable files_reflog_iterator_vtable = { files_reflog_iterator_abort }; -static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store) +static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store, + const char *gitdir) { - struct files_ref_store *refs = - files_downcast(ref_store, REF_STORE_READ, - "reflog_iterator_begin"); struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter)); struct ref_iterator *ref_iterator = &iter->base; struct strbuf sb = STRBUF_INIT; - base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable); - files_reflog_path(refs, &sb, NULL); + base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0); + strbuf_addf(&sb, "%s/logs", gitdir); iter->dir_iterator = dir_iterator_begin(sb.buf); iter->ref_store = ref_store; strbuf_release(&sb); + return ref_iterator; } +static enum iterator_selection reflog_iterator_select( + struct ref_iterator *iter_worktree, + struct ref_iterator *iter_common, + void *cb_data) +{ + if (iter_worktree) { + /* + * We're a bit loose here. We probably should ignore + * common refs if they are accidentally added as + * per-worktree refs. + */ + return ITER_SELECT_0; + } else if (iter_common) { + if (ref_type(iter_common->refname) == REF_TYPE_NORMAL) + return ITER_SELECT_1; + + /* + * The main ref store may contain main worktree's + * per-worktree refs, which should be ignored + */ + return ITER_SKIP_1; + } else + return ITER_DONE; +} + +static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store) +{ + struct files_ref_store *refs = + files_downcast(ref_store, REF_STORE_READ, + "reflog_iterator_begin"); + + if (!strcmp(refs->gitdir, refs->gitcommondir)) { + return reflog_iterator_begin(ref_store, refs->gitcommondir); + } else { + return merge_ref_iterator_begin( + 0, + reflog_iterator_begin(ref_store, refs->gitdir), + reflog_iterator_begin(ref_store, refs->gitcommondir), + reflog_iterator_select, refs); + } +} + /* * If update is a direct update of head_ref (the reference pointed to * by HEAD), then add an extra REF_LOG_ONLY update for HEAD. @@ -2090,7 +2176,7 @@ static int split_head_update(struct ref_update *update, struct ref_update *new_update; if ((update->flags & REF_LOG_ONLY) || - (update->flags & REF_ISPRUNING) || + (update->flags & REF_IS_PRUNING) || (update->flags & REF_UPDATE_VIA_HEAD)) return 0; @@ -2099,11 +2185,10 @@ static int split_head_update(struct ref_update *update, /* * First make sure that HEAD is not already in the - * transaction. This insertion is O(N) in the transaction + * transaction. This check is O(lg N) in the transaction * size, but it happens at most once per transaction. */ - item = string_list_insert(affected_refnames, "HEAD"); - if (item->util) { + if (string_list_has_string(affected_refnames, "HEAD")) { /* An entry already existed */ strbuf_addf(err, "multiple updates for 'HEAD' (including one " @@ -2114,10 +2199,18 @@ static int split_head_update(struct ref_update *update, new_update = ref_transaction_add_update( transaction, "HEAD", - update->flags | REF_LOG_ONLY | REF_NODEREF, - update->new_oid.hash, update->old_oid.hash, + update->flags | REF_LOG_ONLY | REF_NO_DEREF, + &update->new_oid, &update->old_oid, update->msg); + /* + * Add "HEAD". This insertion is O(N) in the transaction + * size, but it happens at most once per transaction. + * Add new_update->refname instead of a literal "HEAD". + */ + if (strcmp(new_update->refname, "HEAD")) + BUG("%s unexpectedly not 'HEAD'", new_update->refname); + item = string_list_insert(affected_refnames, new_update->refname); item->util = new_update; return 0; @@ -2125,8 +2218,8 @@ static int split_head_update(struct ref_update *update, /* * update is for a symref that points at referent and doesn't have - * REF_NODEREF set. Split it into two updates: - * - The original update, but with REF_LOG_ONLY and REF_NODEREF set + * REF_NO_DEREF set. Split it into two updates: + * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set * - A new, separate update for the referent reference * Note that the new update will itself be subject to splitting when * the iteration gets to it. @@ -2144,13 +2237,12 @@ static int split_symref_update(struct files_ref_store *refs, /* * First make sure that referent is not already in the - * transaction. This insertion is O(N) in the transaction + * transaction. This check is O(lg N) in the transaction * size, but it happens at most once per symref in a * transaction. */ - item = string_list_insert(affected_refnames, referent); - if (item->util) { - /* An entry already existed */ + if (string_list_has_string(affected_refnames, referent)) { + /* An entry already exists */ strbuf_addf(err, "multiple updates for '%s' (including one " "via symref '%s') are not allowed", @@ -2172,19 +2264,30 @@ static int split_symref_update(struct files_ref_store *refs, new_update = ref_transaction_add_update( transaction, referent, new_flags, - update->new_oid.hash, update->old_oid.hash, + &update->new_oid, &update->old_oid, update->msg); new_update->parent_update = update; /* * Change the symbolic ref update to log only. Also, it - * doesn't need to check its old SHA-1 value, as that will be + * doesn't need to check its old OID value, as that will be * done when new_update is processed. */ - update->flags |= REF_LOG_ONLY | REF_NODEREF; + update->flags |= REF_LOG_ONLY | REF_NO_DEREF; update->flags &= ~REF_HAVE_OLD; + /* + * Add the referent. This insertion is O(N) in the transaction + * size, but it happens at most once per symref in a + * transaction. Make sure to add new_update->refname, which will + * be valid as long as affected_refnames is in use, and NOT + * referent, which might soon be freed by our caller. + */ + item = string_list_insert(affected_refnames, new_update->refname); + if (item->util) + BUG("%s unexpectedly found in affected_refnames", + new_update->refname); item->util = new_update; return 0; @@ -2237,10 +2340,10 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid, * Prepare for carrying out update: * - Lock the reference referred to by update. * - Read the reference under lock. - * - Check that its old SHA-1 value (if specified) is correct, and in + * - Check that its old OID value (if specified) is correct, and in * any case record it in update->lock->old_oid for later use when * writing the reflog. - * - If it is a symref update without REF_NODEREF, split it up into a + * - If it is a symref update without REF_NO_DEREF, split it up into a * REF_LOG_ONLY update of the symref and add a separate update for * the referent to transaction. * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY @@ -2256,7 +2359,7 @@ static int lock_ref_for_update(struct files_ref_store *refs, struct strbuf referent = STRBUF_INIT; int mustexist = (update->flags & REF_HAVE_OLD) && !is_null_oid(&update->old_oid); - int ret; + int ret = 0; struct ref_lock *lock; files_assert_main_repository(refs, "lock_ref_for_update"); @@ -2268,7 +2371,7 @@ static int lock_ref_for_update(struct files_ref_store *refs, ret = split_head_update(update, transaction, head_ref, affected_refnames, err); if (ret) - return ret; + goto out; } ret = lock_raw_ref(refs, update->refname, mustexist, @@ -2282,35 +2385,37 @@ static int lock_ref_for_update(struct files_ref_store *refs, strbuf_addf(err, "cannot lock ref '%s': %s", original_update_refname(update), reason); free(reason); - return ret; + goto out; } update->backend_data = lock; if (update->type & REF_ISSYMREF) { - if (update->flags & REF_NODEREF) { + if (update->flags & REF_NO_DEREF) { /* * We won't be reading the referent as part of * the transaction, so we have to read it here - * to record and possibly check old_sha1: + * to record and possibly check old_oid: */ if (refs_read_ref_full(&refs->base, referent.buf, 0, - lock->old_oid.hash, NULL)) { + &lock->old_oid, NULL)) { if (update->flags & REF_HAVE_OLD) { strbuf_addf(err, "cannot lock ref '%s': " "error reading reference", original_update_refname(update)); - return -1; + ret = TRANSACTION_GENERIC_ERROR; + goto out; } } else if (check_old_oid(update, &lock->old_oid, err)) { - return TRANSACTION_GENERIC_ERROR; + ret = TRANSACTION_GENERIC_ERROR; + goto out; } } else { /* * Create a new update for the reference this * symref is pointing at. Also, we will record - * and verify old_sha1 for this update as part + * and verify old_oid for this update as part * of processing the split-off update, so we * don't have to do it here. */ @@ -2318,17 +2423,19 @@ static int lock_ref_for_update(struct files_ref_store *refs, referent.buf, transaction, affected_refnames, err); if (ret) - return ret; + goto out; } } else { struct ref_update *parent_update; - if (check_old_oid(update, &lock->old_oid, err)) - return TRANSACTION_GENERIC_ERROR; + if (check_old_oid(update, &lock->old_oid, err)) { + ret = TRANSACTION_GENERIC_ERROR; + goto out; + } /* * If this update is happening indirectly because of a - * symref update, record the old SHA-1 in the parent + * symref update, record the old OID in the parent * update: */ for (parent_update = update->parent_update; @@ -2361,7 +2468,8 @@ static int lock_ref_for_update(struct files_ref_store *refs, "cannot update ref '%s': %s", update->refname, write_err); free(write_err); - return TRANSACTION_GENERIC_ERROR; + ret = TRANSACTION_GENERIC_ERROR; + goto out; } else { update->flags |= REF_NEEDS_COMMIT; } @@ -2372,22 +2480,35 @@ static int lock_ref_for_update(struct files_ref_store *refs, * the lockfile is still open. Close it to * free up the file descriptor: */ - if (close_ref(lock)) { + if (close_ref_gently(lock)) { strbuf_addf(err, "couldn't close '%s.lock'", update->refname); - return TRANSACTION_GENERIC_ERROR; + ret = TRANSACTION_GENERIC_ERROR; + goto out; } } - return 0; + +out: + strbuf_release(&referent); + return ret; } +struct files_transaction_backend_data { + struct ref_transaction *packed_transaction; + int packed_refs_locked; +}; + /* * Unlock any references in `transaction` that are still locked, and * mark the transaction closed. */ -static void files_transaction_cleanup(struct ref_transaction *transaction) +static void files_transaction_cleanup(struct files_ref_store *refs, + struct ref_transaction *transaction) { size_t i; + struct files_transaction_backend_data *backend_data = + transaction->backend_data; + struct strbuf err = STRBUF_INIT; for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; @@ -2399,6 +2520,17 @@ static void files_transaction_cleanup(struct ref_transaction *transaction) } } + if (backend_data->packed_transaction && + ref_transaction_abort(backend_data->packed_transaction, &err)) { + error("error aborting transaction: %s", err.buf); + strbuf_release(&err); + } + + if (backend_data->packed_refs_locked) + packed_refs_unlock(refs->packed_ref_store); + + free(backend_data); + transaction->state = REF_TRANSACTION_CLOSED; } @@ -2414,25 +2546,34 @@ static int files_transaction_prepare(struct ref_store *ref_store, struct string_list affected_refnames = STRING_LIST_INIT_NODUP; char *head_ref = NULL; int head_type; - struct object_id head_oid; + struct files_transaction_backend_data *backend_data; + struct ref_transaction *packed_transaction = NULL; assert(err); if (!transaction->nr) goto cleanup; + backend_data = xcalloc(1, sizeof(*backend_data)); + transaction->backend_data = backend_data; + /* * Fail if a refname appears more than once in the * transaction. (If we end up splitting up any updates using * split_symref_update() or split_head_update(), those * functions will check that the new updates don't have the - * same refname as any existing ones.) + * same refname as any existing ones.) Also fail if any of the + * updates use REF_IS_PRUNING without REF_NO_DEREF. */ for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; struct string_list_item *item = string_list_append(&affected_refnames, update->refname); + if ((update->flags & REF_IS_PRUNING) && + !(update->flags & REF_NO_DEREF)) + BUG("REF_IS_PRUNING set without REF_NO_DEREF"); + /* * We store a pointer to update in item->util, but at * the moment we never use the value of this field @@ -2466,7 +2607,7 @@ static int files_transaction_prepare(struct ref_store *ref_store, */ head_ref = refs_resolve_refdup(ref_store, "HEAD", RESOLVE_REF_NO_RECURSE, - head_oid.hash, &head_type); + NULL, &head_type); if (head_ref && !(head_type & REF_ISSYMREF)) { FREE_AND_NULL(head_ref); @@ -2486,7 +2627,58 @@ static int files_transaction_prepare(struct ref_store *ref_store, ret = lock_ref_for_update(refs, update, transaction, head_ref, &affected_refnames, err); if (ret) - break; + goto cleanup; + + if (update->flags & REF_DELETING && + !(update->flags & REF_LOG_ONLY) && + !(update->flags & REF_IS_PRUNING)) { + /* + * This reference has to be deleted from + * packed-refs if it exists there. + */ + if (!packed_transaction) { + packed_transaction = ref_store_transaction_begin( + refs->packed_ref_store, err); + if (!packed_transaction) { + ret = TRANSACTION_GENERIC_ERROR; + goto cleanup; + } + + backend_data->packed_transaction = + packed_transaction; + } + + ref_transaction_add_update( + packed_transaction, update->refname, + REF_HAVE_NEW | REF_NO_DEREF, + &update->new_oid, NULL, + NULL); + } + } + + if (packed_transaction) { + if (packed_refs_lock(refs->packed_ref_store, 0, err)) { + ret = TRANSACTION_GENERIC_ERROR; + goto cleanup; + } + backend_data->packed_refs_locked = 1; + + if (is_packed_transaction_needed(refs->packed_ref_store, + packed_transaction)) { + ret = ref_transaction_prepare(packed_transaction, err); + } else { + /* + * We can skip rewriting the `packed-refs` + * file. But we do need to leave it locked, so + * that somebody else doesn't pack a reference + * that we are trying to delete. + */ + if (ref_transaction_abort(packed_transaction, err)) { + ret = TRANSACTION_GENERIC_ERROR; + goto cleanup; + } + backend_data->packed_transaction = NULL; + } } cleanup: @@ -2494,7 +2686,7 @@ cleanup: string_list_clear(&affected_refnames, 0); if (ret) - files_transaction_cleanup(transaction); + files_transaction_cleanup(refs, transaction); else transaction->state = REF_TRANSACTION_PREPARED; @@ -2509,9 +2701,10 @@ static int files_transaction_finish(struct ref_store *ref_store, files_downcast(ref_store, 0, "ref_transaction_finish"); size_t i; int ret = 0; - struct string_list refs_to_delete = STRING_LIST_INIT_NODUP; - struct string_list_item *ref_to_delete; struct strbuf sb = STRBUF_INIT; + struct files_transaction_backend_data *backend_data; + struct ref_transaction *packed_transaction; + assert(err); @@ -2520,6 +2713,9 @@ static int files_transaction_finish(struct ref_store *ref_store, return 0; } + backend_data = transaction->backend_data; + packed_transaction = backend_data->packed_transaction; + /* Perform updates first so live commits remain referenced */ for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; @@ -2555,7 +2751,44 @@ static int files_transaction_finish(struct ref_store *ref_store, } } } - /* Perform deletes now that updates are safely completed */ + + /* + * Now that updates are safely completed, we can perform + * deletes. First delete the reflogs of any references that + * will be deleted, since (in the unexpected event of an + * error) leaving a reference without a reflog is less bad + * than leaving a reflog without a reference (the latter is a + * mildly invalid repository state): + */ + for (i = 0; i < transaction->nr; i++) { + struct ref_update *update = transaction->updates[i]; + if (update->flags & REF_DELETING && + !(update->flags & REF_LOG_ONLY) && + !(update->flags & REF_IS_PRUNING)) { + strbuf_reset(&sb); + files_reflog_path(refs, &sb, update->refname); + if (!unlink_or_warn(sb.buf)) + try_remove_empty_parents(refs, update->refname, + REMOVE_EMPTY_PARENTS_REFLOG); + } + } + + /* + * Perform deletes now that updates are safely completed. + * + * First delete any packed versions of the references, while + * retaining the packed-refs lock: + */ + if (packed_transaction) { + ret = ref_transaction_commit(packed_transaction, err); + ref_transaction_free(packed_transaction); + packed_transaction = NULL; + backend_data->packed_transaction = NULL; + if (ret) + goto cleanup; + } + + /* Now delete the loose versions of the references: */ for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; struct ref_lock *lock = update->backend_data; @@ -2573,39 +2806,13 @@ static int files_transaction_finish(struct ref_store *ref_store, } update->flags |= REF_DELETED_LOOSE; } - - if (!(update->flags & REF_ISPRUNING)) - string_list_append(&refs_to_delete, - lock->ref_name); } } - if (packed_refs_lock(refs->packed_ref_store, 0, err)) { - ret = TRANSACTION_GENERIC_ERROR; - goto cleanup; - } - - if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) { - ret = TRANSACTION_GENERIC_ERROR; - packed_refs_unlock(refs->packed_ref_store); - goto cleanup; - } - - packed_refs_unlock(refs->packed_ref_store); - - /* Delete the reflogs of any references that were deleted: */ - for_each_string_list_item(ref_to_delete, &refs_to_delete) { - strbuf_reset(&sb); - files_reflog_path(refs, &sb, ref_to_delete->string); - if (!unlink_or_warn(sb.buf)) - try_remove_empty_parents(refs, ref_to_delete->string, - REMOVE_EMPTY_PARENTS_REFLOG); - } - clear_loose_ref_cache(refs); cleanup: - files_transaction_cleanup(transaction); + files_transaction_cleanup(refs, transaction); for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; @@ -2623,7 +2830,6 @@ cleanup: } strbuf_release(&sb); - string_list_clear(&refs_to_delete, 0); return ret; } @@ -2631,7 +2837,10 @@ static int files_transaction_abort(struct ref_store *ref_store, struct ref_transaction *transaction, struct strbuf *err) { - files_transaction_cleanup(transaction); + struct files_ref_store *refs = + files_downcast(ref_store, 0, "ref_transaction_abort"); + + files_transaction_cleanup(refs, transaction); return 0; } @@ -2653,6 +2862,7 @@ static int files_initial_transaction_commit(struct ref_store *ref_store, size_t i; int ret = 0; struct string_list affected_refnames = STRING_LIST_INIT_NODUP; + struct ref_transaction *packed_transaction = NULL; assert(err); @@ -2685,6 +2895,12 @@ static int files_initial_transaction_commit(struct ref_store *ref_store, &affected_refnames)) die("BUG: initial ref transaction called with existing refs"); + packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err); + if (!packed_transaction) { + ret = TRANSACTION_GENERIC_ERROR; + goto cleanup; + } + for (i = 0; i < transaction->nr; i++) { struct ref_update *update = transaction->updates[i]; @@ -2697,6 +2913,15 @@ static int files_initial_transaction_commit(struct ref_store *ref_store, ret = TRANSACTION_NAME_CONFLICT; goto cleanup; } + + /* + * Add a reference creation for this reference to the + * packed-refs transaction: + */ + ref_transaction_add_update(packed_transaction, update->refname, + update->flags & ~REF_HAVE_OLD, + &update->new_oid, &update->old_oid, + NULL); } if (packed_refs_lock(refs->packed_ref_store, 0, err)) { @@ -2704,22 +2929,14 @@ static int files_initial_transaction_commit(struct ref_store *ref_store, goto cleanup; } - for (i = 0; i < transaction->nr; i++) { - struct ref_update *update = transaction->updates[i]; - - if ((update->flags & REF_HAVE_NEW) && - !is_null_oid(&update->new_oid)) - add_packed_ref(refs->packed_ref_store, update->refname, - &update->new_oid); - } - - if (commit_packed_refs(refs->packed_ref_store, err)) { + if (initial_ref_transaction_commit(packed_transaction, err)) { ret = TRANSACTION_GENERIC_ERROR; - goto cleanup; } -cleanup: packed_refs_unlock(refs->packed_ref_store); +cleanup: + if (packed_transaction) + ref_transaction_free(packed_transaction); transaction->state = REF_TRANSACTION_CLOSED; string_list_clear(&affected_refnames, 0); return ret; @@ -2763,7 +2980,7 @@ static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid, } static int files_reflog_expire(struct ref_store *ref_store, - const char *refname, const unsigned char *sha1, + const char *refname, const struct object_id *oid, unsigned int flags, reflog_expiry_prepare_fn prepare_fn, reflog_expiry_should_prune_fn should_prune_fn, @@ -2780,7 +2997,6 @@ static int files_reflog_expire(struct ref_store *ref_store, int status = 0; int type; struct strbuf err = STRBUF_INIT; - struct object_id oid; memset(&cb, 0, sizeof(cb)); cb.flags = flags; @@ -2792,9 +3008,9 @@ static int files_reflog_expire(struct ref_store *ref_store, * reference itself, plus we might need to update the * reference if --updateref was specified: */ - lock = lock_ref_sha1_basic(refs, refname, sha1, - NULL, NULL, REF_NODEREF, - &type, &err); + lock = lock_ref_oid_basic(refs, refname, oid, + NULL, NULL, REF_NO_DEREF, + &type, &err); if (!lock) { error("cannot lock ref '%s': %s", refname, err.buf); strbuf_release(&err); @@ -2830,9 +3046,7 @@ static int files_reflog_expire(struct ref_store *ref_store, } } - hashcpy(oid.hash, sha1); - - (*prepare_fn)(refname, &oid, cb.policy_cb); + (*prepare_fn)(refname, oid, cb.policy_cb); refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb); (*cleanup_fn)(cb.policy_cb); @@ -2848,16 +3062,17 @@ static int files_reflog_expire(struct ref_store *ref_store, !(type & REF_ISSYMREF) && !is_null_oid(&cb.last_kept_oid); - if (close_lock_file(&reflog_lock)) { + if (close_lock_file_gently(&reflog_lock)) { status |= error("couldn't write %s: %s", log_file, strerror(errno)); + rollback_lock_file(&reflog_lock); } else if (update && - (write_in_full(get_lock_file_fd(lock->lk), - oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ || - write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 || - close_ref(lock) < 0)) { + (write_in_full(get_lock_file_fd(&lock->lk), + oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 || + write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 || + close_ref_gently(lock) < 0)) { status |= error("couldn't write %s", - get_lock_file_path(lock->lk)); + get_lock_file_path(&lock->lk)); rollback_lock_file(&reflog_lock); } else if (commit_lock_file(&reflog_lock)) { status |= error("unable to write reflog '%s' (%s)", @@ -2908,10 +3123,10 @@ struct ref_storage_be refs_be_files = { files_initial_transaction_commit, files_pack_refs, - files_peel_ref, files_create_symref, files_delete_refs, files_rename_ref, + files_copy_ref, files_ref_iterator_begin, files_read_raw_ref, |