summaryrefslogtreecommitdiff
path: root/refs
diff options
context:
space:
mode:
Diffstat (limited to 'refs')
-rw-r--r--refs/files-backend.c385
-rw-r--r--refs/packed-backend.c472
-rw-r--r--refs/packed-backend.h17
-rw-r--r--refs/refs-internal.h1
4 files changed, 595 insertions, 280 deletions
diff --git a/refs/files-backend.c b/refs/files-backend.c
index fccbc24ac4..32663a999e 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -12,7 +12,7 @@
struct ref_lock {
char *ref_name;
- struct lock_file *lk;
+ struct lock_file lk;
struct object_id old_oid;
};
@@ -106,15 +106,6 @@ static void files_reflog_path(struct files_ref_store *refs,
struct strbuf *sb,
const char *refname)
{
- if (!refname) {
- /*
- * FIXME: of course this is wrong in multi worktree
- * setting. To be fixed real soon.
- */
- strbuf_addf(sb, "%s/logs", refs->gitcommondir);
- return;
- }
-
switch (ref_type(refname)) {
case REF_TYPE_PER_WORKTREE:
case REF_TYPE_PSEUDOREF:
@@ -418,9 +409,7 @@ out:
static void unlock_ref(struct ref_lock *lock)
{
- /* Do not free lock->lk -- atexit() still looks at them */
- if (lock->lk)
- rollback_lock_file(lock->lk);
+ rollback_lock_file(&lock->lk);
free(lock->ref_name);
free(lock);
}
@@ -534,11 +523,8 @@ retry:
goto error_return;
}
- if (!lock->lk)
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
if (hold_lock_file_for_update_timeout(
- lock->lk, ref_file.buf, LOCK_NO_DEREF,
+ &lock->lk, ref_file.buf, LOCK_NO_DEREF,
get_files_ref_lock_timeout_ms()) < 0) {
if (errno == ENOENT && --attempts_remaining > 0) {
/*
@@ -949,11 +935,9 @@ static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
goto error_return;
}
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
lock->ref_name = xstrdup(refname);
- if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
+ if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
last_errno = errno;
unable_to_lock_message(ref_file.buf, errno, err);
goto error_return;
@@ -1057,11 +1041,17 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
strbuf_release(&err);
}
-static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r)
+/*
+ * Prune the loose versions of the references in the linked list
+ * `*refs_to_prune`, freeing the entries in the list as we go.
+ */
+static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
{
- while (r) {
+ while (*refs_to_prune) {
+ struct ref_to_prune *r = *refs_to_prune;
+ *refs_to_prune = r->next;
prune_ref(refs, r);
- r = r->next;
+ free(r);
}
}
@@ -1100,6 +1090,11 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
int ok;
struct ref_to_prune *refs_to_prune = NULL;
struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+
+ transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
+ if (!transaction)
+ return -1;
packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
@@ -1115,12 +1110,14 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
continue;
/*
- * Create an entry in the packed-refs cache equivalent
- * to the one from the loose ref cache, except that
- * we don't copy the peeled status, because we want it
- * to be re-peeled.
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
*/
- add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid);
+ if (ref_transaction_update(transaction, iter->refname,
+ iter->oid->hash, NULL,
+ REF_NODEREF, NULL, &err))
+ die("failure preparing to create packed reference %s: %s",
+ iter->refname, err.buf);
/* Schedule the loose reference for pruning if requested. */
if ((flags & PACK_REFS_PRUNE)) {
@@ -1134,11 +1131,14 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
if (ok != ITER_DONE)
die("error while iterating over references");
- if (commit_packed_refs(refs->packed_ref_store, &err))
- die("unable to overwrite old ref-pack file: %s", err.buf);
+ if (ref_transaction_commit(transaction, &err))
+ die("unable to write new packed-refs: %s", err.buf);
+
+ ref_transaction_free(transaction);
+
packed_refs_unlock(refs->packed_ref_store);
- prune_refs(refs, refs_to_prune);
+ prune_refs(refs, &refs_to_prune);
strbuf_release(&err);
return 0;
}
@@ -1157,7 +1157,7 @@ static int files_delete_refs(struct ref_store *ref_store, const char *msg,
if (packed_refs_lock(refs->packed_ref_store, 0, &err))
goto error;
- if (repack_without_refs(refs->packed_ref_store, refnames, &err)) {
+ if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
packed_refs_unlock(refs->packed_ref_store);
goto error;
}
@@ -1402,16 +1402,16 @@ static int files_rename_ref(struct ref_store *ref_store,
return ret;
}
-static int close_ref(struct ref_lock *lock)
+static int close_ref_gently(struct ref_lock *lock)
{
- if (close_lock_file(lock->lk))
+ if (close_lock_file_gently(&lock->lk))
return -1;
return 0;
}
static int commit_ref(struct ref_lock *lock)
{
- char *path = get_locked_file_path(lock->lk);
+ char *path = get_locked_file_path(&lock->lk);
struct stat st;
if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
@@ -1435,7 +1435,7 @@ static int commit_ref(struct ref_lock *lock)
free(path);
}
- if (commit_lock_file(lock->lk))
+ if (commit_lock_file(&lock->lk))
return -1;
return 0;
}
@@ -1627,12 +1627,12 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
unlock_ref(lock);
return -1;
}
- fd = get_lock_file_fd(lock->lk);
+ fd = get_lock_file_fd(&lock->lk);
if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
write_in_full(fd, &term, 1) != 1 ||
- close_ref(lock) < 0) {
+ close_ref_gently(lock) < 0) {
strbuf_addf(err,
- "couldn't write '%s'", get_lock_file_path(lock->lk));
+ "couldn't write '%s'", get_lock_file_path(&lock->lk));
unlock_ref(lock);
return -1;
}
@@ -1709,7 +1709,7 @@ static int create_ref_symlink(struct ref_lock *lock, const char *target)
{
int ret = -1;
#ifndef NO_SYMLINK_HEAD
- char *ref_path = get_locked_file_path(lock->lk);
+ char *ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
ret = symlink(target, ref_path);
free(ref_path);
@@ -1745,14 +1745,14 @@ static int create_symref_locked(struct files_ref_store *refs,
return 0;
}
- if (!fdopen_lock_file(lock->lk, "w"))
+ if (!fdopen_lock_file(&lock->lk, "w"))
return error("unable to fdopen %s: %s",
- lock->lk->tempfile.filename.buf, strerror(errno));
+ lock->lk.tempfile->filename.buf, strerror(errno));
update_symref_reflog(refs, lock, refname, target, logmsg);
/* no error check; commit_ref will check ferror */
- fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
+ fprintf(lock->lk.tempfile->fp, "ref: %s\n", target);
if (commit_ref(lock) < 0)
return error("unable to write symref for %s: %s", refname,
strerror(errno));
@@ -2059,23 +2059,63 @@ static struct ref_iterator_vtable files_reflog_iterator_vtable = {
files_reflog_iterator_abort
};
-static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
+ const char *gitdir)
{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ,
- "reflog_iterator_begin");
struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
- files_reflog_path(refs, &sb, NULL);
+ strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
strbuf_release(&sb);
+
return ref_iterator;
}
+static enum iterator_selection reflog_iterator_select(
+ struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data)
+{
+ if (iter_worktree) {
+ /*
+ * We're a bit loose here. We probably should ignore
+ * common refs if they are accidentally added as
+ * per-worktree refs.
+ */
+ return ITER_SELECT_0;
+ } else if (iter_common) {
+ if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
+ return ITER_SELECT_1;
+
+ /*
+ * The main ref store may contain main worktree's
+ * per-worktree refs, which should be ignored
+ */
+ return ITER_SKIP_1;
+ } else
+ return ITER_DONE;
+}
+
+static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ,
+ "reflog_iterator_begin");
+
+ if (!strcmp(refs->gitdir, refs->gitcommondir)) {
+ return reflog_iterator_begin(ref_store, refs->gitcommondir);
+ } else {
+ return merge_ref_iterator_begin(
+ reflog_iterator_begin(ref_store, refs->gitdir),
+ reflog_iterator_begin(ref_store, refs->gitcommondir),
+ reflog_iterator_select, refs);
+ }
+}
+
/*
* If update is a direct update of head_ref (the reference pointed to
* by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
@@ -2099,11 +2139,10 @@ static int split_head_update(struct ref_update *update,
/*
* First make sure that HEAD is not already in the
- * transaction. This insertion is O(N) in the transaction
+ * transaction. This check is O(lg N) in the transaction
* size, but it happens at most once per transaction.
*/
- item = string_list_insert(affected_refnames, "HEAD");
- if (item->util) {
+ if (string_list_has_string(affected_refnames, "HEAD")) {
/* An entry already existed */
strbuf_addf(err,
"multiple updates for 'HEAD' (including one "
@@ -2118,6 +2157,14 @@ static int split_head_update(struct ref_update *update,
update->new_oid.hash, update->old_oid.hash,
update->msg);
+ /*
+ * Add "HEAD". This insertion is O(N) in the transaction
+ * size, but it happens at most once per transaction.
+ * Add new_update->refname instead of a literal "HEAD".
+ */
+ if (strcmp(new_update->refname, "HEAD"))
+ BUG("%s unexpectedly not 'HEAD'", new_update->refname);
+ item = string_list_insert(affected_refnames, new_update->refname);
item->util = new_update;
return 0;
@@ -2144,13 +2191,12 @@ static int split_symref_update(struct files_ref_store *refs,
/*
* First make sure that referent is not already in the
- * transaction. This insertion is O(N) in the transaction
+ * transaction. This check is O(lg N) in the transaction
* size, but it happens at most once per symref in a
* transaction.
*/
- item = string_list_insert(affected_refnames, referent);
- if (item->util) {
- /* An entry already existed */
+ if (string_list_has_string(affected_refnames, referent)) {
+ /* An entry already exists */
strbuf_addf(err,
"multiple updates for '%s' (including one "
"via symref '%s') are not allowed",
@@ -2185,6 +2231,17 @@ static int split_symref_update(struct files_ref_store *refs,
update->flags |= REF_LOG_ONLY | REF_NODEREF;
update->flags &= ~REF_HAVE_OLD;
+ /*
+ * Add the referent. This insertion is O(N) in the transaction
+ * size, but it happens at most once per symref in a
+ * transaction. Make sure to add new_update->refname, which will
+ * be valid as long as affected_refnames is in use, and NOT
+ * referent, which might soon be freed by our caller.
+ */
+ item = string_list_insert(affected_refnames, new_update->refname);
+ if (item->util)
+ BUG("%s unexpectedly found in affected_refnames",
+ new_update->refname);
item->util = new_update;
return 0;
@@ -2256,7 +2313,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
struct strbuf referent = STRBUF_INIT;
int mustexist = (update->flags & REF_HAVE_OLD) &&
!is_null_oid(&update->old_oid);
- int ret;
+ int ret = 0;
struct ref_lock *lock;
files_assert_main_repository(refs, "lock_ref_for_update");
@@ -2268,7 +2325,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
ret = split_head_update(update, transaction, head_ref,
affected_refnames, err);
if (ret)
- return ret;
+ goto out;
}
ret = lock_raw_ref(refs, update->refname, mustexist,
@@ -2282,7 +2339,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
strbuf_addf(err, "cannot lock ref '%s': %s",
original_update_refname(update), reason);
free(reason);
- return ret;
+ goto out;
}
update->backend_data = lock;
@@ -2301,10 +2358,12 @@ static int lock_ref_for_update(struct files_ref_store *refs,
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
original_update_refname(update));
- return -1;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
} else if (check_old_oid(update, &lock->old_oid, err)) {
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
} else {
/*
@@ -2318,13 +2377,15 @@ static int lock_ref_for_update(struct files_ref_store *refs,
referent.buf, transaction,
affected_refnames, err);
if (ret)
- return ret;
+ goto out;
}
} else {
struct ref_update *parent_update;
- if (check_old_oid(update, &lock->old_oid, err))
- return TRANSACTION_GENERIC_ERROR;
+ if (check_old_oid(update, &lock->old_oid, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
/*
* If this update is happening indirectly because of a
@@ -2361,7 +2422,8 @@ static int lock_ref_for_update(struct files_ref_store *refs,
"cannot update ref '%s': %s",
update->refname, write_err);
free(write_err);
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
} else {
update->flags |= REF_NEEDS_COMMIT;
}
@@ -2372,22 +2434,35 @@ static int lock_ref_for_update(struct files_ref_store *refs,
* the lockfile is still open. Close it to
* free up the file descriptor:
*/
- if (close_ref(lock)) {
+ if (close_ref_gently(lock)) {
strbuf_addf(err, "couldn't close '%s.lock'",
update->refname);
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
}
- return 0;
+
+out:
+ strbuf_release(&referent);
+ return ret;
}
+struct files_transaction_backend_data {
+ struct ref_transaction *packed_transaction;
+ int packed_refs_locked;
+};
+
/*
* Unlock any references in `transaction` that are still locked, and
* mark the transaction closed.
*/
-static void files_transaction_cleanup(struct ref_transaction *transaction)
+static void files_transaction_cleanup(struct files_ref_store *refs,
+ struct ref_transaction *transaction)
{
size_t i;
+ struct files_transaction_backend_data *backend_data =
+ transaction->backend_data;
+ struct strbuf err = STRBUF_INIT;
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
@@ -2399,6 +2474,17 @@ static void files_transaction_cleanup(struct ref_transaction *transaction)
}
}
+ if (backend_data->packed_transaction &&
+ ref_transaction_abort(backend_data->packed_transaction, &err)) {
+ error("error aborting transaction: %s", err.buf);
+ strbuf_release(&err);
+ }
+
+ if (backend_data->packed_refs_locked)
+ packed_refs_unlock(refs->packed_ref_store);
+
+ free(backend_data);
+
transaction->state = REF_TRANSACTION_CLOSED;
}
@@ -2415,12 +2501,17 @@ static int files_transaction_prepare(struct ref_store *ref_store,
char *head_ref = NULL;
int head_type;
struct object_id head_oid;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
if (!transaction->nr)
goto cleanup;
+ backend_data = xcalloc(1, sizeof(*backend_data));
+ transaction->backend_data = backend_data;
+
/*
* Fail if a refname appears more than once in the
* transaction. (If we end up splitting up any updates using
@@ -2487,6 +2578,41 @@ static int files_transaction_prepare(struct ref_store *ref_store,
head_ref, &affected_refnames, err);
if (ret)
break;
+
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ /*
+ * This reference has to be deleted from
+ * packed-refs if it exists there.
+ */
+ if (!packed_transaction) {
+ packed_transaction = ref_store_transaction_begin(
+ refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ backend_data->packed_transaction =
+ packed_transaction;
+ }
+
+ ref_transaction_add_update(
+ packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ update->new_oid.hash, update->old_oid.hash,
+ NULL);
+ }
+ }
+
+ if (packed_transaction) {
+ if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ backend_data->packed_refs_locked = 1;
+ ret = ref_transaction_prepare(packed_transaction, err);
}
cleanup:
@@ -2494,7 +2620,7 @@ cleanup:
string_list_clear(&affected_refnames, 0);
if (ret)
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
else
transaction->state = REF_TRANSACTION_PREPARED;
@@ -2509,9 +2635,10 @@ static int files_transaction_finish(struct ref_store *ref_store,
files_downcast(ref_store, 0, "ref_transaction_finish");
size_t i;
int ret = 0;
- struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;
- struct string_list_item *ref_to_delete;
struct strbuf sb = STRBUF_INIT;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction;
+
assert(err);
@@ -2520,6 +2647,9 @@ static int files_transaction_finish(struct ref_store *ref_store,
return 0;
}
+ backend_data = transaction->backend_data;
+ packed_transaction = backend_data->packed_transaction;
+
/* Perform updates first so live commits remain referenced */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
@@ -2555,7 +2685,44 @@ static int files_transaction_finish(struct ref_store *ref_store,
}
}
}
- /* Perform deletes now that updates are safely completed */
+
+ /*
+ * Now that updates are safely completed, we can perform
+ * deletes. First delete the reflogs of any references that
+ * will be deleted, since (in the unexpected event of an
+ * error) leaving a reference without a reflog is less bad
+ * than leaving a reflog without a reference (the latter is a
+ * mildly invalid repository state):
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ strbuf_reset(&sb);
+ files_reflog_path(refs, &sb, update->refname);
+ if (!unlink_or_warn(sb.buf))
+ try_remove_empty_parents(refs, update->refname,
+ REMOVE_EMPTY_PARENTS_REFLOG);
+ }
+ }
+
+ /*
+ * Perform deletes now that updates are safely completed.
+ *
+ * First delete any packed versions of the references, while
+ * retaining the packed-refs lock:
+ */
+ if (packed_transaction) {
+ ret = ref_transaction_commit(packed_transaction, err);
+ ref_transaction_free(packed_transaction);
+ packed_transaction = NULL;
+ backend_data->packed_transaction = NULL;
+ if (ret)
+ goto cleanup;
+ }
+
+ /* Now delete the loose versions of the references: */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
struct ref_lock *lock = update->backend_data;
@@ -2573,39 +2740,13 @@ static int files_transaction_finish(struct ref_store *ref_store,
}
update->flags |= REF_DELETED_LOOSE;
}
-
- if (!(update->flags & REF_ISPRUNING))
- string_list_append(&refs_to_delete,
- lock->ref_name);
}
}
- if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- packed_refs_unlock(refs->packed_ref_store);
- goto cleanup;
- }
-
- packed_refs_unlock(refs->packed_ref_store);
-
- /* Delete the reflogs of any references that were deleted: */
- for_each_string_list_item(ref_to_delete, &refs_to_delete) {
- strbuf_reset(&sb);
- files_reflog_path(refs, &sb, ref_to_delete->string);
- if (!unlink_or_warn(sb.buf))
- try_remove_empty_parents(refs, ref_to_delete->string,
- REMOVE_EMPTY_PARENTS_REFLOG);
- }
-
clear_loose_ref_cache(refs);
cleanup:
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
@@ -2623,7 +2764,6 @@ cleanup:
}
strbuf_release(&sb);
- string_list_clear(&refs_to_delete, 0);
return ret;
}
@@ -2631,7 +2771,10 @@ static int files_transaction_abort(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
{
- files_transaction_cleanup(transaction);
+ struct files_ref_store *refs =
+ files_downcast(ref_store, 0, "ref_transaction_abort");
+
+ files_transaction_cleanup(refs, transaction);
return 0;
}
@@ -2653,6 +2796,7 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
size_t i;
int ret = 0;
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
@@ -2685,6 +2829,12 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
&affected_refnames))
die("BUG: initial ref transaction called with existing refs");
+ packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
@@ -2697,6 +2847,15 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
ret = TRANSACTION_NAME_CONFLICT;
goto cleanup;
}
+
+ /*
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
+ */
+ ref_transaction_add_update(packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ update->new_oid.hash, update->old_oid.hash,
+ NULL);
}
if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
@@ -2704,21 +2863,14 @@ static int files_initial_transaction_commit(struct ref_store *ref_store,
goto cleanup;
}
- for (i = 0; i < transaction->nr; i++) {
- struct ref_update *update = transaction->updates[i];
-
- if ((update->flags & REF_HAVE_NEW) &&
- !is_null_oid(&update->new_oid))
- add_packed_ref(refs->packed_ref_store, update->refname,
- &update->new_oid);
- }
-
- if (commit_packed_refs(refs->packed_ref_store, err)) {
+ if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
cleanup:
+ if (packed_transaction)
+ ref_transaction_free(packed_transaction);
packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
@@ -2848,16 +3000,17 @@ static int files_reflog_expire(struct ref_store *ref_store,
!(type & REF_ISSYMREF) &&
!is_null_oid(&cb.last_kept_oid);
- if (close_lock_file(&reflog_lock)) {
+ if (close_lock_file_gently(&reflog_lock)) {
status |= error("couldn't write %s: %s", log_file,
strerror(errno));
+ rollback_lock_file(&reflog_lock);
} else if (update &&
- (write_in_full(get_lock_file_fd(lock->lk),
+ (write_in_full(get_lock_file_fd(&lock->lk),
oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
- close_ref(lock) < 0)) {
+ write_str_in_full(get_lock_file_fd(&lock->lk), "\n") != 1 ||
+ close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
- get_lock_file_path(lock->lk));
+ get_lock_file_path(&lock->lk));
rollback_lock_file(&reflog_lock);
} else if (commit_lock_file(&reflog_lock)) {
status |= error("unable to write reflog '%s' (%s)",
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index 412c85034f..3bc47ffd5e 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -75,7 +75,7 @@ struct packed_ref_store {
* "packed-refs" file. Note that this (and thus the enclosing
* `packed_ref_store`) must not be freed.
*/
- struct tempfile tempfile;
+ struct tempfile *tempfile;
};
struct ref_store *packed_ref_store_create(const char *path,
@@ -92,19 +92,6 @@ struct ref_store *packed_ref_store_create(const char *path,
}
/*
- * Die if refs is not the main ref store. caller is used in any
- * necessary error messages.
- */
-static void packed_assert_main_repository(struct packed_ref_store *refs,
- const char *caller)
-{
- if (refs->store_flags & REF_STORE_MAIN)
- return;
-
- die("BUG: operation %s only allowed for main ref store", caller);
-}
-
-/*
* Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
* not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
* support at least the flags specified in `required_flags`. `caller`
@@ -322,40 +309,6 @@ static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
}
/*
- * Add or overwrite a reference in the in-memory packed reference
- * cache. This may only be called while the packed-refs file is locked
- * (see packed_refs_lock()). To actually write the packed-refs file,
- * call commit_packed_refs().
- */
-void add_packed_ref(struct ref_store *ref_store,
- const char *refname, const struct object_id *oid)
-{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_WRITE,
- "add_packed_ref");
- struct ref_dir *packed_refs;
- struct ref_entry *packed_entry;
-
- if (!is_lock_file_locked(&refs->lock))
- die("BUG: packed refs not locked");
-
- if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
- die("Reference has invalid format: '%s'", refname);
-
- packed_refs = get_packed_refs(refs);
- packed_entry = find_ref_entry(packed_refs, refname);
- if (packed_entry) {
- /* Overwrite the existing entry: */
- oidcpy(&packed_entry->u.value.oid, oid);
- packed_entry->flag = REF_ISPACKED;
- oidclr(&packed_entry->u.value.peeled);
- } else {
- packed_entry = create_ref_entry(refname, oid, REF_ISPACKED);
- add_ref_entry(packed_refs, packed_entry);
- }
-}
-
-/*
* Return the ref_entry for the given refname from the packed
* references. If it does not exist, return NULL.
*/
@@ -525,7 +478,6 @@ int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
"packed_refs_lock");
static int timeout_configured = 0;
static int timeout_value = 1000;
- struct packed_ref_cache *packed_ref_cache;
if (!timeout_configured) {
git_config_get_int("core.packedrefstimeout", &timeout_value);
@@ -545,8 +497,9 @@ int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
return -1;
}
- if (close_lock_file(&refs->lock)) {
+ if (close_lock_file_gently(&refs->lock)) {
strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
+ rollback_lock_file(&refs->lock);
return -1;
}
@@ -560,9 +513,11 @@ int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
*/
validate_packed_ref_cache(refs);
- packed_ref_cache = get_packed_ref_cache(refs);
- /* Increment the reference count to prevent it from being freed: */
- acquire_packed_ref_cache(packed_ref_cache);
+ /*
+ * Now make sure that the packed-refs file as it exists in the
+ * locked state is loaded into the cache:
+ */
+ get_packed_ref_cache(refs);
return 0;
}
@@ -576,7 +531,6 @@ void packed_refs_unlock(struct ref_store *ref_store)
if (!is_lock_file_locked(&refs->lock))
die("BUG: packed_refs_unlock() called when not locked");
rollback_lock_file(&refs->lock);
- release_packed_ref_cache(refs->cache);
}
int packed_refs_is_locked(struct ref_store *ref_store)
@@ -596,29 +550,35 @@ int packed_refs_is_locked(struct ref_store *ref_store)
static const char PACKED_REFS_HEADER[] =
"# pack-refs with: peeled fully-peeled \n";
+static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
+{
+ /* Nothing to do. */
+ return 0;
+}
+
/*
- * Write the current version of the packed refs cache from memory to
- * disk. The packed-refs file must already be locked for writing (see
- * packed_refs_lock()). Return zero on success. On errors, rollback
- * the lockfile, write an error message to `err`, and return a nonzero
- * value.
+ * Write the packed-refs from the cache to the packed-refs tempfile,
+ * incorporating any changes from `updates`. `updates` must be a
+ * sorted string list whose keys are the refnames and whose util
+ * values are `struct ref_update *`. On error, rollback the tempfile,
+ * write an error message to `err`, and return a nonzero value.
+ *
+ * The packfile must be locked before calling this function and will
+ * remain locked when it is done.
*/
-int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err)
+static int write_with_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
- "commit_packed_refs");
- struct packed_ref_cache *packed_ref_cache =
- get_packed_ref_cache(refs);
+ struct ref_iterator *iter = NULL;
+ size_t i;
int ok;
- int ret = -1;
- struct strbuf sb = STRBUF_INIT;
FILE *out;
- struct ref_iterator *iter;
+ struct strbuf sb = STRBUF_INIT;
char *packed_refs_path;
if (!is_lock_file_locked(&refs->lock))
- die("BUG: commit_packed_refs() called when unlocked");
+ die("BUG: write_with_updates() called while unlocked");
/*
* If packed-refs is a symlink, we want to overwrite the
@@ -627,146 +587,303 @@ int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err)
*/
packed_refs_path = get_locked_file_path(&refs->lock);
strbuf_addf(&sb, "%s.new", packed_refs_path);
- if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
+ free(packed_refs_path);
+ refs->tempfile = create_tempfile(sb.buf);
+ if (!refs->tempfile) {
strbuf_addf(err, "unable to create file %s: %s",
sb.buf, strerror(errno));
strbuf_release(&sb);
- goto out;
+ return -1;
}
strbuf_release(&sb);
- out = fdopen_tempfile(&refs->tempfile, "w");
+ out = fdopen_tempfile(refs->tempfile, "w");
if (!out) {
strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
strerror(errno));
goto error;
}
- if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) {
- strbuf_addf(err, "error writing to %s: %s",
- get_tempfile_path(&refs->tempfile), strerror(errno));
- goto error;
- }
+ if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
+ goto write_error;
+
+ /*
+ * We iterate in parallel through the current list of refs and
+ * the list of updates, processing an entry from at least one
+ * of the lists each time through the loop. When the current
+ * list of refs is exhausted, set iter to NULL. When the list
+ * of updates is exhausted, leave i set to updates->nr.
+ */
+ iter = packed_ref_iterator_begin(&refs->base, "",
+ DO_FOR_EACH_INCLUDE_BROKEN);
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ iter = NULL;
+
+ i = 0;
- iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0);
- while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
- struct object_id peeled;
- int peel_error = ref_iterator_peel(iter, &peeled);
-
- if (write_packed_entry(out, iter->refname, iter->oid->hash,
- peel_error ? NULL : peeled.hash)) {
- strbuf_addf(err, "error writing to %s: %s",
- get_tempfile_path(&refs->tempfile),
- strerror(errno));
- ref_iterator_abort(iter);
- goto error;
+ while (iter || i < updates->nr) {
+ struct ref_update *update = NULL;
+ int cmp;
+
+ if (i >= updates->nr) {
+ cmp = -1;
+ } else {
+ update = updates->items[i].util;
+
+ if (!iter)
+ cmp = +1;
+ else
+ cmp = strcmp(iter->refname, update->refname);
+ }
+
+ if (!cmp) {
+ /*
+ * There is both an old value and an update
+ * for this reference. Check the old value if
+ * necessary:
+ */
+ if ((update->flags & REF_HAVE_OLD)) {
+ if (is_null_oid(&update->old_oid)) {
+ strbuf_addf(err, "cannot update ref '%s': "
+ "reference already exists",
+ update->refname);
+ goto error;
+ } else if (oidcmp(&update->old_oid, iter->oid)) {
+ strbuf_addf(err, "cannot update ref '%s': "
+ "is at %s but expected %s",
+ update->refname,
+ oid_to_hex(iter->oid),
+ oid_to_hex(&update->old_oid));
+ goto error;
+ }
+ }
+
+ /* Now figure out what to use for the new value: */
+ if ((update->flags & REF_HAVE_NEW)) {
+ /*
+ * The update takes precedence. Skip
+ * the iterator over the unneeded
+ * value.
+ */
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ iter = NULL;
+ cmp = +1;
+ } else {
+ /*
+ * The update doesn't actually want to
+ * change anything. We're done with it.
+ */
+ i++;
+ cmp = -1;
+ }
+ } else if (cmp > 0) {
+ /*
+ * There is no old value but there is an
+ * update for this reference. Make sure that
+ * the update didn't expect an existing value:
+ */
+ if ((update->flags & REF_HAVE_OLD) &&
+ !is_null_oid(&update->old_oid)) {
+ strbuf_addf(err, "cannot update ref '%s': "
+ "reference is missing but expected %s",
+ update->refname,
+ oid_to_hex(&update->old_oid));
+ goto error;
+ }
+ }
+
+ if (cmp < 0) {
+ /* Pass the old reference through. */
+
+ struct object_id peeled;
+ int peel_error = ref_iterator_peel(iter, &peeled);
+
+ if (write_packed_entry(out, iter->refname,
+ iter->oid->hash,
+ peel_error ? NULL : peeled.hash))
+ goto write_error;
+
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ iter = NULL;
+ } else if (is_null_oid(&update->new_oid)) {
+ /*
+ * The update wants to delete the reference,
+ * and the reference either didn't exist or we
+ * have already skipped it. So we're done with
+ * the update (and don't have to write
+ * anything).
+ */
+ i++;
+ } else {
+ struct object_id peeled;
+ int peel_error = peel_object(update->new_oid.hash,
+ peeled.hash);
+
+ if (write_packed_entry(out, update->refname,
+ update->new_oid.hash,
+ peel_error ? NULL : peeled.hash))
+ goto write_error;
+
+ i++;
}
}
if (ok != ITER_DONE) {
- strbuf_addf(err, "unable to rewrite packed-refs file: "
+ strbuf_addf(err, "unable to write packed-refs file: "
"error iterating over old contents");
goto error;
}
- if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
- strbuf_addf(err, "error replacing %s: %s",
- refs->path, strerror(errno));
- goto out;
+ if (close_tempfile_gently(refs->tempfile)) {
+ strbuf_addf(err, "error closing file %s: %s",
+ get_tempfile_path(refs->tempfile),
+ strerror(errno));
+ strbuf_release(&sb);
+ delete_tempfile(&refs->tempfile);
+ return -1;
}
- ret = 0;
- goto out;
+ return 0;
+
+write_error:
+ strbuf_addf(err, "error writing to %s: %s",
+ get_tempfile_path(refs->tempfile), strerror(errno));
error:
- delete_tempfile(&refs->tempfile);
+ if (iter)
+ ref_iterator_abort(iter);
-out:
- free(packed_refs_path);
- return ret;
+ delete_tempfile(&refs->tempfile);
+ return -1;
}
-/*
- * Rewrite the packed-refs file, omitting any refs listed in
- * 'refnames'. On error, leave packed-refs unchanged, write an error
- * message to 'err', and return a nonzero value. The packed refs lock
- * must be held when calling this function; it will still be held when
- * the function returns.
- *
- * The refs in 'refnames' needn't be sorted. `err` must not be NULL.
- */
-int repack_without_refs(struct ref_store *ref_store,
- struct string_list *refnames, struct strbuf *err)
+struct packed_transaction_backend_data {
+ /* True iff the transaction owns the packed-refs lock. */
+ int own_lock;
+
+ struct string_list updates;
+};
+
+static void packed_transaction_cleanup(struct packed_ref_store *refs,
+ struct ref_transaction *transaction)
{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
- "repack_without_refs");
- struct ref_dir *packed;
- struct string_list_item *refname;
- int needs_repacking = 0, removed = 0;
+ struct packed_transaction_backend_data *data = transaction->backend_data;
- packed_assert_main_repository(refs, "repack_without_refs");
- assert(err);
+ if (data) {
+ string_list_clear(&data->updates, 0);
- if (!is_lock_file_locked(&refs->lock))
- die("BUG: repack_without_refs called without holding lock");
+ if (is_tempfile_active(refs->tempfile))
+ delete_tempfile(&refs->tempfile);
- /* Look for a packed ref */
- for_each_string_list_item(refname, refnames) {
- if (get_packed_ref(refs, refname->string)) {
- needs_repacking = 1;
- break;
+ if (data->own_lock && is_lock_file_locked(&refs->lock)) {
+ packed_refs_unlock(&refs->base);
+ data->own_lock = 0;
}
- }
- /* Avoid locking if we have nothing to do */
- if (!needs_repacking)
- return 0; /* no refname exists in packed refs */
-
- packed = get_packed_refs(refs);
-
- /* Remove refnames from the cache */
- for_each_string_list_item(refname, refnames)
- if (remove_entry_from_dir(packed, refname->string) != -1)
- removed = 1;
- if (!removed) {
- /*
- * All packed entries disappeared while we were
- * acquiring the lock.
- */
- clear_packed_ref_cache(refs);
- return 0;
+ free(data);
+ transaction->backend_data = NULL;
}
- /* Write what remains */
- return commit_packed_refs(&refs->base, err);
-}
-
-static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
-{
- /* Nothing to do. */
- return 0;
+ transaction->state = REF_TRANSACTION_CLOSED;
}
static int packed_transaction_prepare(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
{
- die("BUG: not implemented yet");
+ struct packed_ref_store *refs = packed_downcast(
+ ref_store,
+ REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
+ "ref_transaction_prepare");
+ struct packed_transaction_backend_data *data;
+ size_t i;
+ int ret = TRANSACTION_GENERIC_ERROR;
+
+ /*
+ * Note that we *don't* skip transactions with zero updates,
+ * because such a transaction might be executed for the side
+ * effect of ensuring that all of the references are peeled.
+ * If the caller wants to optimize away empty transactions, it
+ * should do so itself.
+ */
+
+ data = xcalloc(1, sizeof(*data));
+ string_list_init(&data->updates, 0);
+
+ transaction->backend_data = data;
+
+ /*
+ * Stick the updates in a string list by refname so that we
+ * can sort them:
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ struct string_list_item *item =
+ string_list_append(&data->updates, update->refname);
+
+ /* Store a pointer to update in item->util: */
+ item->util = update;
+ }
+ string_list_sort(&data->updates);
+
+ if (ref_update_reject_duplicates(&data->updates, err))
+ goto failure;
+
+ if (!is_lock_file_locked(&refs->lock)) {
+ if (packed_refs_lock(ref_store, 0, err))
+ goto failure;
+ data->own_lock = 1;
+ }
+
+ if (write_with_updates(refs, &data->updates, err))
+ goto failure;
+
+ transaction->state = REF_TRANSACTION_PREPARED;
+ return 0;
+
+failure:
+ packed_transaction_cleanup(refs, transaction);
+ return ret;
}
static int packed_transaction_abort(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
{
- die("BUG: not implemented yet");
+ struct packed_ref_store *refs = packed_downcast(
+ ref_store,
+ REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
+ "ref_transaction_abort");
+
+ packed_transaction_cleanup(refs, transaction);
+ return 0;
}
static int packed_transaction_finish(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
{
- die("BUG: not implemented yet");
+ struct packed_ref_store *refs = packed_downcast(
+ ref_store,
+ REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
+ "ref_transaction_finish");
+ int ret = TRANSACTION_GENERIC_ERROR;
+ char *packed_refs_path;
+
+ packed_refs_path = get_locked_file_path(&refs->lock);
+ if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
+ strbuf_addf(err, "error replacing %s: %s",
+ refs->path, strerror(errno));
+ goto cleanup;
+ }
+
+ clear_packed_ref_cache(refs);
+ ret = 0;
+
+cleanup:
+ free(packed_refs_path);
+ packed_transaction_cleanup(refs, transaction);
+ return ret;
}
static int packed_initial_transaction_commit(struct ref_store *ref_store,
@@ -779,7 +896,50 @@ static int packed_initial_transaction_commit(struct ref_store *ref_store,
static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
struct string_list *refnames, unsigned int flags)
{
- die("BUG: not implemented yet");
+ struct packed_ref_store *refs =
+ packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
+ struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+ struct string_list_item *item;
+ int ret;
+
+ (void)refs; /* We need the check above, but don't use the variable */
+
+ if (!refnames->nr)
+ return 0;
+
+ /*
+ * Since we don't check the references' old_oids, the
+ * individual updates can't fail, so we can pack all of the
+ * updates into a single transaction.
+ */
+
+ transaction = ref_store_transaction_begin(ref_store, &err);
+ if (!transaction)
+ return -1;
+
+ for_each_string_list_item(item, refnames) {
+ if (ref_transaction_delete(transaction, item->string, NULL,
+ flags, msg, &err)) {
+ warning(_("could not delete reference %s: %s"),
+ item->string, err.buf);
+ strbuf_reset(&err);
+ }
+ }
+
+ ret = ref_transaction_commit(transaction, &err);
+
+ if (ret) {
+ if (refnames->nr == 1)
+ error(_("could not delete reference %s: %s"),
+ refnames->items[0].string, err.buf);
+ else
+ error(_("could not delete references: %s"), err.buf);
+ }
+
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ return ret;
}
static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 03b7c1de95..61687e408a 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -1,6 +1,15 @@
#ifndef REFS_PACKED_BACKEND_H
#define REFS_PACKED_BACKEND_H
+/*
+ * Support for storing references in a `packed-refs` file.
+ *
+ * Note that this backend doesn't check for D/F conflicts, because it
+ * doesn't care about them. But usually it should be wrapped in a
+ * `files_ref_store` that prevents D/F conflicts from being created,
+ * even among packed refs.
+ */
+
struct ref_store *packed_ref_store_create(const char *path,
unsigned int store_flags);
@@ -14,12 +23,4 @@ int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
void packed_refs_unlock(struct ref_store *ref_store);
int packed_refs_is_locked(struct ref_store *ref_store);
-void add_packed_ref(struct ref_store *ref_store,
- const char *refname, const struct object_id *oid);
-
-int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err);
-
-int repack_without_refs(struct ref_store *ref_store,
- struct string_list *refnames, struct strbuf *err);
-
#endif /* REFS_PACKED_BACKEND_H */
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index b02dc5a7e3..d7d344de73 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -242,6 +242,7 @@ struct ref_transaction {
size_t alloc;
size_t nr;
enum ref_transaction_state state;
+ void *backend_data;
};
/*