From 3733e6946465d4a3a1d89026a5ec911d3af339ab Mon Sep 17 00:00:00 2001 From: Jeff King Date: Mon, 22 Feb 2016 17:44:28 -0500 Subject: use xmallocz to avoid size arithmetic We frequently allocate strings as xmalloc(len + 1), where the extra 1 is for the NUL terminator. This can be done more simply with xmallocz, which also checks for integer overflow. There's no case where switching xmalloc(n+1) to xmallocz(n) is wrong; the result is the same length, and malloc made no guarantees about what was in the buffer anyway. But in some cases, we can stop manually placing NUL at the end of the allocated buffer. But that's only safe if it's clear that the contents will always fill the buffer. In each case where this patch does so, I manually examined the control flow, and I tried to err on the side of caution. Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- refs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'refs.c') diff --git a/refs.c b/refs.c index e2d34b253e..1d9e2a7932 100644 --- a/refs.c +++ b/refs.c @@ -124,7 +124,7 @@ int refname_is_safe(const char *refname) char *buf; int result; - buf = xmalloc(strlen(refname) + 1); + buf = xmallocz(strlen(refname)); /* * Does the refname try to escape refs/? * For example: refs/foo/../bar is safe but refs/foo/../../bar -- cgit v1.2.3 From 96ffc06f72f693d80f05059a1f0e5ca9007d5f1b Mon Sep 17 00:00:00 2001 From: Jeff King Date: Mon, 22 Feb 2016 17:44:32 -0500 Subject: convert trivial cases to FLEX_ARRAY macros Using FLEX_ARRAY macros reduces the amount of manual computation size we have to do. It also ensures we don't overflow size_t, and it makes sure we write the same number of bytes that we allocated. Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- refs.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'refs.c') diff --git a/refs.c b/refs.c index 1d9e2a7932..2d86445231 100644 --- a/refs.c +++ b/refs.c @@ -761,10 +761,8 @@ void ref_transaction_free(struct ref_transaction *transaction) static struct ref_update *add_update(struct ref_transaction *transaction, const char *refname) { - size_t len = strlen(refname) + 1; - struct ref_update *update = xcalloc(1, sizeof(*update) + len); - - memcpy((char *)update->refname, refname, len); /* includes NUL */ + struct ref_update *update; + FLEX_ALLOC_STR(update, refname, refname); ALLOC_GROW(transaction->updates, transaction->nr + 1, transaction->alloc); transaction->updates[transaction->nr++] = update; return update; -- cgit v1.2.3 From 50a6c8efa2bbeddf46ca34c7765024108202e04b Mon Sep 17 00:00:00 2001 From: Jeff King Date: Mon, 22 Feb 2016 17:44:35 -0500 Subject: use st_add and st_mult for allocation size computation If our size computation overflows size_t, we may allocate a much smaller buffer than we expected and overflow it. It's probably impossible to trigger an overflow in most of these sites in practice, but it is easy enough convert their additions and multiplications into overflow-checking variants. This may be fixing real bugs, and it makes auditing the code easier. Signed-off-by: Jeff King Signed-off-by: Junio C Hamano --- refs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'refs.c') diff --git a/refs.c b/refs.c index 2d86445231..b0e6ece6f4 100644 --- a/refs.c +++ b/refs.c @@ -906,7 +906,7 @@ char *shorten_unambiguous_ref(const char *refname, int strict) /* -2 for strlen("%.*s") - strlen("%s"); +1 for NUL */ total_len += strlen(ref_rev_parse_rules[nr_rules]) - 2 + 1; - scanf_fmts = xmalloc(nr_rules * sizeof(char *) + total_len); + scanf_fmts = xmalloc(st_add(st_mult(nr_rules, sizeof(char *)), total_len)); offset = 0; for (i = 0; i < nr_rules; i++) { -- cgit v1.2.3