summaryrefslogtreecommitdiff
path: root/object-store.h
diff options
context:
space:
mode:
Diffstat (limited to 'object-store.h')
-rw-r--r--object-store.h77
1 files changed, 71 insertions, 6 deletions
diff --git a/object-store.h b/object-store.h
index 7f7b3cdd80..d1e490f203 100644
--- a/object-store.h
+++ b/object-store.h
@@ -4,8 +4,9 @@
#include "cache.h"
#include "oidmap.h"
#include "list.h"
-#include "sha1-array.h"
+#include "oid-array.h"
#include "strbuf.h"
+#include "thread-utils.h"
struct object_directory {
struct object_directory *next;
@@ -60,6 +61,7 @@ struct oid_array *odb_loose_cache(struct object_directory *odb,
void odb_clear_loose_cache(struct object_directory *odb);
struct packed_git {
+ struct hashmap_entry packmap_ent;
struct packed_git *next;
struct list_head mru;
struct pack_window *windows;
@@ -88,6 +90,20 @@ struct packed_git {
struct multi_pack_index;
+static inline int pack_map_entry_cmp(const void *unused_cmp_data,
+ const struct hashmap_entry *entry,
+ const struct hashmap_entry *entry2,
+ const void *keydata)
+{
+ const char *key = keydata;
+ const struct packed_git *pg1, *pg2;
+
+ pg1 = container_of(entry, const struct packed_git, packmap_ent);
+ pg2 = container_of(entry2, const struct packed_git, packmap_ent);
+
+ return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
+}
+
struct raw_object_store {
/*
* Set of all object directories; the main directory is first (and
@@ -110,6 +126,8 @@ struct raw_object_store {
* (see git-replace(1)).
*/
struct oidmap *replace_map;
+ unsigned replace_map_initialized : 1;
+ pthread_mutex_t replace_mutex; /* protect object replace functions */
struct commit_graph *commit_graph;
unsigned commit_graph_attempted : 1; /* if loading has been attempted */
@@ -132,6 +150,12 @@ struct raw_object_store {
struct list_head packed_git_mru;
/*
+ * A map of packfiles to packed_git structs for tracking which
+ * packs have been loaded already.
+ */
+ struct hashmap pack_map;
+
+ /*
* A fast, rough count of the number of objects in the repository.
* These two fields are not meant for direct access. Use
* approximate_object_count() instead.
@@ -177,8 +201,9 @@ static inline void *repo_read_object_file(struct repository *r,
/* Read and unpack an object file into memory, write memory to an object file */
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
-int hash_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid);
+int hash_object_file(const struct git_hash_algo *algo, const void *buf,
+ unsigned long len, const char *type,
+ struct object_id *oid);
int write_object_file(const void *buf, unsigned long len,
const char *type, struct object_id *oid);
@@ -187,6 +212,14 @@ int hash_object_file_literally(const void *buf, unsigned long len,
const char *type, struct object_id *oid,
unsigned flags);
+/*
+ * Add an object file to the in-memory object store, without writing it
+ * to disk.
+ *
+ * Callers are responsible for calling write_object_file to record the
+ * object in persistent storage before writing any other new objects
+ * that reference it.
+ */
int pretend_object_file(void *, unsigned long, enum object_type,
struct object_id *oid);
@@ -228,12 +261,46 @@ int has_loose_object_nonlocal(const struct object_id *);
void assert_oid_type(const struct object_id *oid, enum object_type expect);
+/*
+ * Enabling the object read lock allows multiple threads to safely call the
+ * following functions in parallel: repo_read_object_file(), read_object_file(),
+ * read_object_file_extended(), read_object_with_reference(), read_object(),
+ * oid_object_info() and oid_object_info_extended().
+ *
+ * obj_read_lock() and obj_read_unlock() may also be used to protect other
+ * section which cannot execute in parallel with object reading. Since the used
+ * lock is a recursive mutex, these sections can even contain calls to object
+ * reading functions. However, beware that in these cases zlib inflation won't
+ * be performed in parallel, losing performance.
+ *
+ * TODO: oid_object_info_extended()'s call stack has a recursive behavior. If
+ * any of its callees end up calling it, this recursive call won't benefit from
+ * parallel inflation.
+ */
+void enable_obj_read_lock(void);
+void disable_obj_read_lock(void);
+
+extern int obj_read_use_lock;
+extern pthread_mutex_t obj_read_mutex;
+
+static inline void obj_read_lock(void)
+{
+ if(obj_read_use_lock)
+ pthread_mutex_lock(&obj_read_mutex);
+}
+
+static inline void obj_read_unlock(void)
+{
+ if(obj_read_use_lock)
+ pthread_mutex_unlock(&obj_read_mutex);
+}
+
struct object_info {
/* Request */
enum object_type *typep;
unsigned long *sizep;
off_t *disk_sizep;
- unsigned char *delta_base_sha1;
+ struct object_id *delta_base_oid;
struct strbuf *type_name;
void **contentp;
@@ -271,8 +338,6 @@ struct object_info {
#define OBJECT_INFO_LOOKUP_REPLACE 1
/* Allow reading from a loose object file of unknown/bogus type */
#define OBJECT_INFO_ALLOW_UNKNOWN_TYPE 2
-/* Do not check cached storage */
-#define OBJECT_INFO_SKIP_CACHED 4
/* Do not retry packed storage after checking packed and loose storage */
#define OBJECT_INFO_QUICK 8
/* Do not check loose object */