diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-13 16:36:30 -0800 |
---|---|---|
committer | Junio C Hamano <gitster@pobox.com> | 2008-11-14 19:11:49 -0800 |
commit | 671c9b7e315db89081cc69f83a8f405e4aca37bc (patch) | |
tree | 0410fb1af855fc65a7d96dd15410db16e41cbc9f /preload-index.c | |
parent | Merge git://git.bogomips.org/git-svn (diff) | |
download | tgif-671c9b7e315db89081cc69f83a8f405e4aca37bc.tar.xz |
Add cache preload facility
This can do the lstat() storm in parallel, giving potentially much
improved performance for cold-cache cases or things like NFS that have
weak metadata caching.
Just use "read_cache_preload()" instead of "read_cache()" to force an
optimistic preload of the index stat data. The function takes a
pathspec as its argument, allowing us to preload only the relevant
portion of the index.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 'preload-index.c')
-rw-r--r-- | preload-index.c | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/preload-index.c b/preload-index.c new file mode 100644 index 0000000000..6253578c96 --- /dev/null +++ b/preload-index.c @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2008 Linus Torvalds + */ +#include "cache.h" +#include <pthread.h> + +/* + * Mostly randomly chosen maximum thread counts: we + * cap the parallelism to 20 threads, and we want + * to have at least 500 lstat's per thread for it to + * be worth starting a thread. + */ +#define MAX_PARALLEL (20) +#define THREAD_COST (500) + +struct thread_data { + pthread_t pthread; + struct index_state *index; + const char **pathspec; + int offset, nr; +}; + +static void *preload_thread(void *_data) +{ + int nr; + struct thread_data *p = _data; + struct index_state *index = p->index; + struct cache_entry **cep = index->cache + p->offset; + + nr = p->nr; + if (nr + p->offset > index->cache_nr) + nr = index->cache_nr - p->offset; + + do { + struct cache_entry *ce = *cep++; + struct stat st; + + if (ce_stage(ce)) + continue; + if (ce_uptodate(ce)) + continue; + if (!ce_path_match(ce, p->pathspec)) + continue; + if (lstat(ce->name, &st)) + continue; + if (ie_match_stat(index, ce, &st, 0)) + continue; + ce_mark_uptodate(ce); + } while (--nr > 0); + return NULL; +} + +static void preload_index(struct index_state *index, const char **pathspec) +{ + int threads, i, work, offset; + struct thread_data data[MAX_PARALLEL]; + + if (!core_preload_index) + return; + + threads = index->cache_nr / THREAD_COST; + if (threads < 2) + return; + if (threads > MAX_PARALLEL) + threads = MAX_PARALLEL; + offset = 0; + work = (index->cache_nr + threads - 1) / threads; + for (i = 0; i < threads; i++) { + struct thread_data *p = data+i; + p->index = index; + p->pathspec = pathspec; + p->offset = offset; + p->nr = work; + offset += work; + if (pthread_create(&p->pthread, NULL, preload_thread, p)) + die("unable to create threaded lstat"); + } + for (i = 0; i < threads; i++) { + struct thread_data *p = data+i; + if (pthread_join(p->pthread, NULL)) + die("unable to join threaded lstat"); + } +} + +int read_index_preload(struct index_state *index, const char **pathspec) +{ + int retval = read_index(index); + + preload_index(index, pathspec); + return retval; +} |