summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cbtree.h2
-rw-r--r--hash.h2
-rw-r--r--oidtree.c20
3 files changed, 17 insertions, 7 deletions
diff --git a/cbtree.h b/cbtree.h
index fe4587087e..a04a312c3f 100644
--- a/cbtree.h
+++ b/cbtree.h
@@ -25,7 +25,7 @@ struct cb_node {
*/
uint32_t byte;
uint8_t otherbits;
- uint8_t k[FLEX_ARRAY]; /* arbitrary data */
+ uint8_t k[FLEX_ARRAY]; /* arbitrary data, unaligned */
};
struct cb_tree {
diff --git a/hash.h b/hash.h
index 27a180248f..9e25c40e9a 100644
--- a/hash.h
+++ b/hash.h
@@ -115,7 +115,7 @@ static inline void git_SHA256_Clone(git_SHA256_CTX *dst, const git_SHA256_CTX *s
struct object_id {
unsigned char hash[GIT_MAX_RAWSZ];
- int algo;
+ int algo; /* XXX requires 4-byte alignment */
};
/* A suitably aligned type for stack allocations of hash contexts. */
diff --git a/oidtree.c b/oidtree.c
index 580cab8ae2..0d39389bee 100644
--- a/oidtree.c
+++ b/oidtree.c
@@ -31,12 +31,19 @@ void oidtree_clear(struct oidtree *ot)
void oidtree_insert(struct oidtree *ot, const struct object_id *oid)
{
struct cb_node *on;
+ struct object_id k;
if (!oid->algo)
BUG("oidtree_insert requires oid->algo");
on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid));
- oidcpy_with_padding((struct object_id *)on->k, oid);
+
+ /*
+ * Clear the padding and copy the result in separate steps to
+ * respect the 4-byte alignment needed by struct object_id.
+ */
+ oidcpy_with_padding(&k, oid);
+ memcpy(on->k, &k, sizeof(k));
/*
* n.b. Current callers won't get us duplicates, here. If a
@@ -68,17 +75,20 @@ int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
static enum cb_next iter(struct cb_node *n, void *arg)
{
struct oidtree_iter_data *x = arg;
- const struct object_id *oid = (const struct object_id *)n->k;
+ struct object_id k;
+
+ /* Copy to provide 4-byte alignment needed by struct object_id. */
+ memcpy(&k, n->k, sizeof(k));
- if (x->algo != GIT_HASH_UNKNOWN && x->algo != oid->algo)
+ if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo)
return CB_CONTINUE;
if (x->last_nibble_at) {
- if ((oid->hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0)
+ if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0)
return CB_CONTINUE;
}
- return x->fn(oid, x->arg);
+ return x->fn(&k, x->arg);
}
void oidtree_each(struct oidtree *ot, const struct object_id *oid,