summaryrefslogtreecommitdiff
path: root/refs/files-backend.c
diff options
context:
space:
mode:
Diffstat (limited to 'refs/files-backend.c')
-rw-r--r--refs/files-backend.c55
1 files changed, 10 insertions, 45 deletions
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 4b46cd2e26..1b6141e718 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -641,43 +641,6 @@ out:
return ret;
}
-static int files_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- int flag;
- unsigned char base[20];
-
- if (current_ref_iter && current_ref_iter->refname == refname) {
- struct object_id peeled;
-
- if (ref_iterator_peel(current_ref_iter, &peeled))
- return -1;
- hashcpy(sha1, peeled.hash);
- return 0;
- }
-
- if (refs_read_ref_full(ref_store, refname,
- RESOLVE_REF_READING, base, &flag))
- return -1;
-
- /*
- * If the reference is packed, read its ref_entry from the
- * cache in the hope that we already know its peeled value.
- * We only try this optimization on packed references because
- * (a) forcing the filling of the loose reference cache could
- * be expensive and (b) loose references anyway usually do not
- * have REF_KNOWS_PEELED.
- */
- if (flag & REF_ISPACKED &&
- !refs_peel_ref(refs->packed_ref_store, refname, sha1))
- return 0;
-
- return peel_object(base, sha1);
-}
-
struct files_ref_iterator {
struct ref_iterator base;
@@ -748,7 +711,7 @@ static struct ref_iterator *files_ref_iterator_begin(
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_iterator *loose_iter, *packed_iter;
+ struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
@@ -758,10 +721,6 @@ static struct ref_iterator *files_ref_iterator_begin(
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
- iter = xcalloc(1, sizeof(*iter));
- ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
-
/*
* We must make sure that all loose refs are read before
* accessing the packed-refs file; this avoids a race
@@ -797,7 +756,13 @@ static struct ref_iterator *files_ref_iterator_begin(
refs->packed_ref_store, prefix, 0,
DO_FOR_EACH_INCLUDE_BROKEN);
- iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
+ overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
+
+ iter = xcalloc(1, sizeof(*iter));
+ ref_iterator = &iter->base;
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
+ overlay_iter->ordered);
+ iter->iter0 = overlay_iter;
iter->flags = flags;
return ref_iterator;
@@ -2094,7 +2059,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
@@ -2138,6 +2103,7 @@ static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_st
return reflog_iterator_begin(ref_store, refs->gitcommondir);
} else {
return merge_ref_iterator_begin(
+ 0,
reflog_iterator_begin(ref_store, refs->gitdir),
reflog_iterator_begin(ref_store, refs->gitcommondir),
reflog_iterator_select, refs);
@@ -3089,7 +3055,6 @@ struct ref_storage_be refs_be_files = {
files_initial_transaction_commit,
files_pack_refs,
- files_peel_ref,
files_create_symref,
files_delete_refs,
files_rename_ref,