Reworked maybe_grow to take a single ptr, renamed to afl_realloc (#505)

* maybe_grow takes a single ptr

* fixed use_deflate

* reworked maybe_grow_bufsize

* helper to access underlying buf

* remove redundant realloc_block

* code format

* fixes

* added unit tests

* renamed maybe_grow to afl_realloc

* BUF_PARAMS -> AFL_BUF_PARAM
diff --git a/custom_mutators/radamsa/custom_mutator_helpers.h b/custom_mutators/radamsa/custom_mutator_helpers.h
index 0848321..e23c0b6 100644
--- a/custom_mutators/radamsa/custom_mutator_helpers.h
+++ b/custom_mutators/radamsa/custom_mutator_helpers.h
@@ -324,7 +324,7 @@
 }
 
 /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */
-static inline void swap_bufs(void **buf1, size_t *size1, void **buf2,
+static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2,
                              size_t *size2) {
 
   void * scratch_buf = *buf1;
diff --git a/examples/afl_network_proxy/afl-network-server.c b/examples/afl_network_proxy/afl-network-server.c
index ab7874f..c70fd47 100644
--- a/examples/afl_network_proxy/afl-network-server.c
+++ b/examples/afl_network_proxy/afl-network-server.c
@@ -73,9 +73,8 @@
 static u8 *in_data;                    /* Input data for trimming           */
 static u8 *buf2;
 
-static s32    in_len;
-static u32    map_size = MAP_SIZE;
-static size_t buf2_len;
+static s32 in_len;
+static u32 map_size = MAP_SIZE;
 
 static volatile u8 stop_soon;          /* Ctrl-C pressed?                   */
 
@@ -272,7 +271,7 @@
 
       setenv("QEMU_SET_ENV", buf, 1);
 
-      ck_free(buf);
+      afl_free(buf);
 
     } else {
 
@@ -343,7 +342,7 @@
 
 }
 
-int recv_testcase(int s, void **buf, size_t *max_len) {
+int recv_testcase(int s, void **buf) {
 
   u32    size;
   s32    ret;
@@ -358,7 +357,8 @@
 
   if ((size & 0xff000000) != 0xff000000) {
 
-    *buf = ck_maybe_grow(buf, max_len, size);
+    *buf = afl_realloc((void **)&buf, size);
+    if (unlikely(!buf)) { PFATAL("Alloc"); }
     received = 0;
     // fprintf(stderr, "unCOMPRESS (%u)\n", size);
     while (received < size &&
@@ -370,7 +370,8 @@
 #ifdef USE_DEFLATE
     u32 clen;
     size -= 0xff000000;
-    *buf = ck_maybe_grow(buf, max_len, size);
+    *buf = afl_realloc((void **)&buf, size);
+    if (unlikely(!buf)) { PFATAL("Alloc"); }
     received = 0;
     while (received < 4 &&
            (ret = recv(s, &clen + received, 4 - received, 0)) > 0)
@@ -379,15 +380,15 @@
     // fprintf(stderr, "received clen information of %d\n", clen);
     if (clen < 1)
       FATAL("did not receive valid compressed len information: %u", clen);
-    buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, clen);
+    buf2 = afl_realloc((void **)&buf2, clen);
+    if (unlikely(!buf2)) { PFATAL("Alloc"); }
     received = 0;
     while (received < clen &&
            (ret = recv(s, buf2 + received, clen - received, 0)) > 0)
       received += ret;
     if (received != clen) FATAL("did not receive compressed information");
     if (libdeflate_deflate_decompress(decompressor, buf2, clen, (char *)*buf,
-                                      *max_len,
-                                      &received) != LIBDEFLATE_SUCCESS)
+                                      size, &received) != LIBDEFLATE_SUCCESS)
       FATAL("decompression failed");
       // fprintf(stderr, "DECOMPRESS (%u->%u):\n", clen, received);
       // for (u32 i = 0; i < clen; i++) fprintf(stderr, "%02x", buf2[i]);
@@ -413,7 +414,6 @@
 int main(int argc, char **argv_orig, char **envp) {
 
   s32    opt, s, sock, on = 1, port = -1;
-  size_t max_len = 0;
   u8     mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
   char **use_argv;
   struct sockaddr_in6 serveraddr, clientaddr;
@@ -568,7 +568,8 @@
   sharedmem_t shm = {0};
   fsrv->trace_bits = afl_shm_init(&shm, map_size, 0);
 
-  in_data = ck_maybe_grow((void **)&in_data, &max_len, 65536);
+  in_data = afl_realloc((void **)&in_data, 65536);
+  if (unlikely(!in_data)) { PFATAL("Alloc"); }
 
   atexit(at_exit_handler);
   setup_signal_handlers();
@@ -639,7 +640,8 @@
 #ifdef USE_DEFLATE
   compressor = libdeflate_alloc_compressor(1);
   decompressor = libdeflate_alloc_decompressor();
-  buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, map_size + 16);
+  buf2 = afl_realloc((void **)&buf2, map_size + 16);
+  if (unlikely(!buf2)) { PFATAL("alloc"); }
   lenptr = (u32 *)(buf2 + 4);
   fprintf(stderr, "Compiled with compression support\n");
 #endif
@@ -664,7 +666,7 @@
 
 #endif
 
-  while ((in_len = recv_testcase(s, (void **)&in_data, &max_len)) > 0) {
+  while ((in_len = recv_testcase(s, (void **)&in_data)) > 0) {
 
     // fprintf(stderr, "received %u\n", in_len);
     (void)run_target(fsrv, use_argv, in_data, in_len, 1);
@@ -697,9 +699,9 @@
   afl_shm_deinit(&shm);
   afl_fsrv_deinit(fsrv);
   if (fsrv->target_path) { ck_free(fsrv->target_path); }
-  if (in_data) { ck_free(in_data); }
+  afl_free(in_data);
 #if USE_DEFLATE
-  if (buf2) { ck_free(buf2); }
+  afl_free(buf2);
   libdeflate_free_compressor(compressor);
   libdeflate_free_decompressor(decompressor);
 #endif
diff --git a/examples/custom_mutators/custom_mutator_helpers.h b/examples/custom_mutators/custom_mutator_helpers.h
index 0848321..ad5acb0 100644
--- a/examples/custom_mutators/custom_mutator_helpers.h
+++ b/examples/custom_mutators/custom_mutator_helpers.h
@@ -324,8 +324,8 @@
 }
 
 /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */
-static inline void swap_bufs(void **buf1, size_t *size1, void **buf2,
-                             size_t *size2) {
+static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2,
+                                 size_t *size2) {
 
   void * scratch_buf = *buf1;
   size_t scratch_size = *size1;
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index ca7d10f..dca395a 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -126,6 +126,9 @@
 
 #define STAGE_BUF_SIZE (64)  /* usable size for stage name buf in afl_state */
 
+// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc.
+#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf)
+
 extern s8  interesting_8[INTERESTING_8_LEN];
 extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
 extern s32
@@ -572,7 +575,6 @@
 
   // growing buf
   struct queue_entry **queue_buf;
-  size_t               queue_size;
 
   struct queue_entry **top_rated;           /* Top entries for bitmap bytes */
 
@@ -633,24 +635,18 @@
 
   /*needed for afl_fuzz_one */
   // TODO: see which we can reuse
-  u8 *   out_buf;
-  size_t out_size;
+  u8 *out_buf;
 
-  u8 *   out_scratch_buf;
-  size_t out_scratch_size;
+  u8 *out_scratch_buf;
 
-  u8 *   eff_buf;
-  size_t eff_size;
+  u8 *eff_buf;
 
-  u8 *   in_buf;
-  size_t in_size;
+  u8 *in_buf;
 
-  u8 *   in_scratch_buf;
-  size_t in_scratch_size;
+  u8 *in_scratch_buf;
 
-  u8 *   ex_buf;
-  size_t ex_size;
-  u32    custom_mutators_count;
+  u8 *ex_buf;
+  u32 custom_mutators_count;
 
   list_t custom_mutator_list;
 
@@ -666,7 +662,6 @@
   char *      name_short;
   void *      dh;
   u8 *        post_process_buf;
-  size_t      post_process_size;
   u8          stacked_custom_prob, stacked_custom;
 
   void *data;                                    /* custom mutator data ptr */
diff --git a/include/alloc-inl.h b/include/alloc-inl.h
index 306cc62..90701d1 100644
--- a/include/alloc-inl.h
+++ b/include/alloc-inl.h
@@ -30,12 +30,13 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <stddef.h>
 
 #include "config.h"
 #include "types.h"
 #include "debug.h"
 
-/* Initial size used for ck_maybe_grow */
+/* Initial size used for afl_realloc */
 #define INITIAL_GROWTH_SIZE (64)
 
 // Be careful! _WANT_ORIGINAL_AFL_ALLOC is not compatible with custom mutators
@@ -76,10 +77,6 @@
                                                                         \
     } while (0)
 
-  /* Allocator increments for ck_realloc_block(). */
-
-  #define ALLOC_BLK_INC 256
-
 /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
    requests. */
 
@@ -149,15 +146,6 @@
 
 }
 
-/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
-   repeated small reallocs without complicating the user code). */
-
-static inline void *DFL_ck_realloc_block(void *orig, u32 size) {
-
-  return DFL_ck_realloc(orig, size);
-
-}
-
 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
 
 static inline u8 *DFL_ck_strdup(u8 *str) {
@@ -183,7 +171,6 @@
   #define ck_alloc DFL_ck_alloc
   #define ck_alloc_nozero DFL_ck_alloc_nozero
   #define ck_realloc DFL_ck_realloc
-  #define ck_realloc_block DFL_ck_realloc_block
   #define ck_strdup DFL_ck_strdup
   #define ck_free DFL_ck_free
 
@@ -239,10 +226,6 @@
   #define ALLOC_OFF_HEAD 8
   #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)
 
-  /* Allocator increments for ck_realloc_block(). */
-
-  #define ALLOC_BLK_INC 256
-
   /* Sanity-checking macros for pointers. */
 
   #define CHECK_PTR(_p)                            \
@@ -402,29 +385,6 @@
 
 }
 
-/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
-   repeated small reallocs without complicating the user code). */
-
-static inline void *DFL_ck_realloc_block(void *orig, u32 size) {
-
-  #ifndef DEBUG_BUILD
-
-  if (orig) {
-
-    CHECK_PTR(orig);
-
-    if (ALLOC_S(orig) >= size) return orig;
-
-    size += ALLOC_BLK_INC;
-
-  }
-
-  #endif                                                    /* !DEBUG_BUILD */
-
-  return DFL_ck_realloc(orig, size);
-
-}
-
 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
 
 static inline u8 *DFL_ck_strdup(u8 *str) {
@@ -458,7 +418,6 @@
     #define ck_alloc DFL_ck_alloc
     #define ck_alloc_nozero DFL_ck_alloc_nozero
     #define ck_realloc DFL_ck_realloc
-    #define ck_realloc_block DFL_ck_realloc_block
     #define ck_strdup DFL_ck_strdup
     #define ck_free DFL_ck_free
 
@@ -528,8 +487,8 @@
 
   /* No space available - allocate more. */
 
-  TRK[bucket] = DFL_ck_realloc_block(
-      TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
+  TRK[bucket] = DFL_ck_realloc(TRK[bucket],
+                               (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
 
   TRK[bucket][i].ptr = ptr;
   TRK[bucket][i].file = (char *)file;
@@ -604,16 +563,6 @@
 
 }
 
-static inline void *TRK_ck_realloc_block(void *orig, u32 size, const char *file,
-                                         const char *func, u32 line) {
-
-  void *ret = DFL_ck_realloc_block(orig, size);
-  TRK_free_buf(orig, file, func, line);
-  TRK_alloc_buf(ret, file, func, line);
-  return ret;
-
-}
-
 static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func,
                                   u32 line) {
 
@@ -641,9 +590,6 @@
     #define ck_realloc(_p1, _p2) \
       TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-    #define ck_realloc_block(_p1, _p2) \
-      TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
-
     #define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
 
     #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
@@ -657,11 +603,14 @@
 */
 static inline size_t next_pow2(size_t in) {
 
-  if (in == 0 || in > (size_t)-1) {
+  // Commented this out as this behavior doesn't change, according to unittests
+  // if (in == 0 || in > (size_t)-1) {
 
-    return 0;                  /* avoid undefined behaviour under-/overflow */
-
-  }
+  //
+  //   return 0;                  /* avoid undefined behaviour under-/overflow
+  //   */
+  //
+  // }
 
   size_t out = in - 1;
   out |= out >> 1;
@@ -673,6 +622,35 @@
 
 }
 
+/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr
+ * arithmetics */
+struct afl_alloc_buf {
+
+  /* The complete allocated size, including the header of len
+   * AFL_ALLOC_SIZE_OFFSET */
+  size_t complete_size;
+  /* ptr to the first element of the actual buffer */
+  u8 buf[0];
+
+};
+
+#define AFL_ALLOC_SIZE_OFFSET (offsetof(struct afl_alloc_buf, buf))
+
+/* Returs the container element to this ptr */
+static inline struct afl_alloc_buf *afl_alloc_bufptr(void *buf) {
+
+  return (struct afl_alloc_buf *)((u8 *)buf - AFL_ALLOC_SIZE_OFFSET);
+
+}
+
+/* Gets the maximum size of the buf contents (ptr->complete_size -
+ * AFL_ALLOC_SIZE_OFFSET) */
+static inline size_t afl_alloc_bufsize(void *buf) {
+
+  return afl_alloc_bufptr(buf)->complete_size - AFL_ALLOC_SIZE_OFFSET;
+
+}
+
 /* This function makes sure *size is > size_needed after call.
  It will realloc *buf otherwise.
  *size will grow exponentially as per:
@@ -680,71 +658,68 @@
  Will return NULL and free *buf if size_needed is <1 or realloc failed.
  @return For convenience, this function returns *buf.
  */
-static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) {
+static inline void *afl_realloc(void **buf, size_t size_needed) {
+
+  struct afl_alloc_buf *new_buf = NULL;
+
+  size_t current_size = 0;
+  size_t next_size = 0;
+
+  if (likely(*buf)) {
+
+    /* the size is always stored at buf - 1*size_t */
+    new_buf = afl_alloc_bufptr(*buf);
+    current_size = new_buf->complete_size;
+
+  }
+
+  size_needed += AFL_ALLOC_SIZE_OFFSET;
 
   /* No need to realloc */
-  if (likely(size_needed && *size >= size_needed)) { return *buf; }
+  if (likely(current_size >= size_needed)) { return *buf; }
 
   /* No initial size was set */
-  if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; }
+  if (size_needed < INITIAL_GROWTH_SIZE) {
 
-  /* grow exponentially */
-  size_t next_size = next_pow2(size_needed);
+    next_size = INITIAL_GROWTH_SIZE;
 
-  /* handle overflow and zero size_needed */
-  if (!next_size) { next_size = size_needed; }
+  } else {
+
+    /* grow exponentially */
+    next_size = next_pow2(size_needed);
+
+    /* handle overflow: fall back to the original size_needed */
+    if (unlikely(!next_size)) { next_size = size_needed; }
+
+  }
 
   /* alloc */
-  *buf = realloc(*buf, next_size);
-  *size = *buf ? next_size : 0;
+  new_buf = realloc(new_buf, next_size);
+  if (unlikely(!new_buf)) {
 
+    *buf = NULL;
+    return NULL;
+
+  }
+
+  new_buf->complete_size = next_size;
+  *buf = (void *)(new_buf->buf);
   return *buf;
 
 }
 
-/* This function makes sure *size is > size_needed after call.
- It will realloc *buf otherwise.
- *size will grow exponentially as per:
- https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/
- Will FATAL if size_needed is <1.
- @return For convenience, this function returns *buf.
- */
-static inline void *ck_maybe_grow(void **buf, size_t *size,
-                                  size_t size_needed) {
+static inline void afl_free(void *buf) {
 
-  /* Oops. found a bug? */
-  if (unlikely(size_needed < 1)) { FATAL("cannot grow to non-positive size"); }
-
-  /* No need to realloc */
-  if (likely(*size >= size_needed)) { return *buf; }
-
-  /* No initial size was set */
-  if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; }
-
-  /* grow exponentially */
-  size_t next_size = next_pow2(size_needed);
-
-  /* handle overflow */
-  if (!next_size) { next_size = size_needed; }
-
-  /* alloc */
-  *buf = ck_realloc(*buf, next_size);
-  *size = next_size;
-
-  return *buf;
+  if (buf) { free(afl_alloc_bufptr(buf)); }
 
 }
 
 /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */
-static inline void swap_bufs(void **buf1, size_t *size1, void **buf2,
-                             size_t *size2) {
+static inline void afl_swap_bufs(void **buf1, void **buf2) {
 
-  void * scratch_buf = *buf1;
-  size_t scratch_size = *size1;
+  void *scratch_buf = *buf1;
   *buf1 = *buf2;
-  *size1 = *size2;
   *buf2 = scratch_buf;
-  *size2 = scratch_size;
 
 }
 
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
index 17f0298..88262a9 100644
--- a/src/afl-fuzz-extras.c
+++ b/src/afl-fuzz-extras.c
@@ -152,8 +152,10 @@
     /* Okay, let's allocate memory and copy data between "...", handling
        \xNN escaping, \\, and \". */
 
-    afl->extras = ck_realloc_block(
-        afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
+    afl->extras =
+        afl_realloc((void **)&afl->extras,
+                    (afl->extras_cnt + 1) * sizeof(struct extra_data));
+    if (unlikely(!afl->extras)) { PFATAL("alloc"); }
 
     wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr);
 
@@ -296,8 +298,10 @@
     if (min_len > st.st_size) { min_len = st.st_size; }
     if (max_len < st.st_size) { max_len = st.st_size; }
 
-    afl->extras = ck_realloc_block(
-        afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
+    afl->extras =
+        afl_realloc((void **)&afl->extras,
+                    (afl->extras_cnt + 1) * sizeof(struct extra_data));
+    if (unlikely(!afl->extras)) { PFATAL("alloc"); }
 
     afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size);
     afl->extras[afl->extras_cnt].len = st.st_size;
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 0fa646f..22578df 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -122,9 +122,8 @@
 
       if (el->post_process_buf) {
 
-        ck_free(el->post_process_buf);
+        afl_free(el->post_process_buf);
         el->post_process_buf = NULL;
-        el->post_process_size = 0;
 
       }
 
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 0a4be32..3bf0c19 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -364,8 +364,6 @@
 
 #endif                                                     /* !IGNORE_FINDS */
 
-#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
-
 /* Take the current entry from the queue, fuzz it for a while. This
    function is a tad too long... returns 0 if fuzzed successfully, 1 if
    skipped or bailed out. */
@@ -384,9 +382,6 @@
   u8  a_collect[MAX_AUTO_EXTRA];
   u32 a_len = 0;
 
-/* Not pretty, but saves a lot of writing */
-#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
-
 #ifdef IGNORE_FINDS
 
   /* In IGNORE_FINDS mode, skip any entries that weren't in the
@@ -484,7 +479,8 @@
      single byte anyway, so it wouldn't give us any performance or memory usage
      benefits. */
 
-  out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+  out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+  if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
   afl->subseq_tmouts = 0;
 
@@ -800,7 +796,8 @@
   /* Initialize effector map for the next step (see comments below). Always
      flag first and last byte as doing something. */
 
-  eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len));
+  eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len));
+  if (unlikely(!eff_map)) { PFATAL("alloc"); }
   eff_map[0] = 1;
 
   if (EFF_APOS(len - 1) != 0) {
@@ -1557,7 +1554,8 @@
 
   orig_hit_cnt = new_hit_cnt;
 
-  ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE);
+  ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE);
+  if (unlikely(!ex_tmp)) { PFATAL("alloc"); }
 
   for (i = 0; i <= (u32)len; ++i) {
 
@@ -1733,7 +1731,8 @@
         fd = open(target->fname, O_RDONLY);
         if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); }
 
-        new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len);
+        new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
+        if (unlikely(!new_buf)) { PFATAL("alloc"); }
         ck_read(fd, new_buf, target->len, target->fname);
         close(fd);
 
@@ -1908,7 +1907,8 @@
               temp_len = new_len;
               if (out_buf != custom_havoc_buf) {
 
-                ck_maybe_grow(BUF_PARAMS(out), temp_len);
+                afl_realloc(AFL_BUF_PARAM(out), temp_len);
+                if (unlikely(!afl->out_buf)) { PFATAL("alloc"); }
                 memcpy(out_buf, custom_havoc_buf, temp_len);
 
               }
@@ -2147,7 +2147,8 @@
             clone_to = rand_below(afl, temp_len);
 
             new_buf =
-                ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len);
+                afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len);
+            if (unlikely(!new_buf)) { PFATAL("alloc"); }
 
             /* Head */
 
@@ -2172,7 +2173,7 @@
             memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
                    temp_len - clone_to);
 
-            swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch));
+            afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
             out_buf = new_buf;
             new_buf = NULL;
             temp_len += clone_len;
@@ -2287,7 +2288,8 @@
 
               if (temp_len + extra_len >= MAX_FILE) { break; }
 
-              out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
+              out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len);
+              if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
               /* Tail */
               memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
@@ -2343,7 +2345,8 @@
             }
 
             u32 new_len = target->len;
-            u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len);
+            u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len);
+            if (unlikely(!new_buf)) { PFATAL("alloc"); }
 
             ck_read(fd, new_buf, new_len, target->fname);
 
@@ -2383,7 +2386,8 @@
               clone_to = rand_below(afl, temp_len);
 
               u8 *temp_buf =
-                  ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len);
+                  afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len);
+              if (unlikely(!temp_buf)) { PFATAL("alloc"); }
 
               /* Head */
 
@@ -2397,7 +2401,7 @@
               memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to,
                      temp_len - clone_to);
 
-              swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch));
+              afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
               out_buf = temp_buf;
               temp_len += clone_len;
 
@@ -2418,7 +2422,8 @@
     /* out_buf might have been mangled a bit, so let's restore it to its
        original size and shape. */
 
-    out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+    out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+    if (unlikely(!out_buf)) { PFATAL("alloc"); }
     temp_len = len;
     memcpy(out_buf, in_buf, len);
 
@@ -2513,7 +2518,8 @@
 
     if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); }
 
-    new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len);
+    new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
+    if (unlikely(!new_buf)) { PFATAL("alloc"); }
 
     ck_read(fd, new_buf, target->len, target->fname);
 
@@ -2535,10 +2541,11 @@
 
     len = target->len;
     memcpy(new_buf, in_buf, split_at);
-    swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch));
+    afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
     in_buf = new_buf;
 
-    out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+    out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+    if (unlikely(!out_buf)) { PFATAL("alloc"); }
     memcpy(out_buf, in_buf, len);
 
     goto custom_mutator_stage;
@@ -2679,7 +2686,8 @@
      single byte anyway, so it wouldn't give us any performance or memory usage
      benefits. */
 
-  out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+  out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+  if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
   afl->subseq_tmouts = 0;
 
@@ -3001,7 +3009,8 @@
   /* Initialize effector map for the next step (see comments below). Always
          flag first and last byte as doing something. */
 
-  eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len));
+  eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len));
+  if (unlikely(!eff_map)) { PFATAL("alloc"); }
   eff_map[0] = 1;
 
   if (EFF_APOS(len - 1) != 0) {
@@ -3758,7 +3767,8 @@
 
   orig_hit_cnt = new_hit_cnt;
 
-  ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE);
+  ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE);
+  if (unlikely(!ex_tmp)) { PFATAL("alloc"); }
 
   for (i = 0; i <= (u32)len; ++i) {
 
@@ -4196,8 +4206,9 @@
 
                 clone_to = rand_below(afl, temp_len);
 
-                new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch),
-                                        temp_len + clone_len);
+                new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch),
+                                      temp_len + clone_len);
+                if (unlikely(!new_buf)) { PFATAL("alloc"); }
 
                 /* Head */
 
@@ -4223,7 +4234,7 @@
                 memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
                        temp_len - clone_to);
 
-                swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch));
+                afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
                 out_buf = new_buf;
                 temp_len += clone_len;
                 MOpt_globals.cycles_v2[STAGE_Clone75] += 1;
@@ -4340,7 +4351,8 @@
 
               if (temp_len + extra_len >= MAX_FILE) break;
 
-              out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
+              out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len);
+              if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
               /* Tail */
               memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
@@ -4373,7 +4385,8 @@
         /* out_buf might have been mangled a bit, so let's restore it to its
            original size and shape. */
 
-        out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+        out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+        if (unlikely(!out_buf)) { PFATAL("alloc"); }
         temp_len = len;
         memcpy(out_buf, in_buf, len);
 
@@ -4518,7 +4531,8 @@
 
         if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); }
 
-        new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len);
+        new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
+        if (unlikely(!new_buf)) { PFATAL("alloc"); }
 
         ck_read(fd, new_buf, target->len, target->fname);
 
@@ -4545,9 +4559,10 @@
 
         len = target->len;
         memcpy(new_buf, in_buf, split_at);
-        swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch));
+        afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
         in_buf = new_buf;
-        out_buf = ck_maybe_grow(BUF_PARAMS(out), len);
+        out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
+        if (unlikely(!out_buf)) { PFATAL("alloc"); }
         memcpy(out_buf, in_buf, len);
 
         goto havoc_stage_puppet;
@@ -4880,5 +4895,3 @@
 
 }
 
-#undef BUF_PARAMS
-
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index a077469..e540f54 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -40,9 +40,7 @@
 
   /* sorry for this makro...
   it just fills in `&py_mutator->something_buf, &py_mutator->something_size`. */
-  #define BUF_PARAMS(name)                              \
-    (void **)&((py_mutator_t *)py_mutator)->name##_buf, \
-        &((py_mutator_t *)py_mutator)->name##_size
+  #define BUF_PARAMS(name) (void **)&((py_mutator_t *)py_mutator)->name##_buf
 
 static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf,
                       u8 *add_buf, size_t add_buf_size, size_t max_size) {
@@ -97,7 +95,8 @@
 
     mutated_size = PyByteArray_Size(py_value);
 
-    *out_buf = ck_maybe_grow(BUF_PARAMS(fuzz), mutated_size);
+    *out_buf = afl_realloc(BUF_PARAMS(fuzz), mutated_size);
+    if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
     memcpy(*out_buf, PyByteArray_AsString(py_value), mutated_size);
     Py_DECREF(py_value);
@@ -317,7 +316,6 @@
 
   mutator = ck_alloc(sizeof(struct custom_mutator));
   mutator->post_process_buf = NULL;
-  mutator->post_process_size = 0;
 
   mutator->name = module_name;
   ACTF("Loading Python mutator library from '%s'...", module_name);
@@ -419,7 +417,11 @@
 
     py_out_buf_size = PyByteArray_Size(py_value);
 
-    ck_maybe_grow(BUF_PARAMS(post_process), py_out_buf_size);
+    if (unlikely(!afl_realloc(BUF_PARAMS(post_process), py_out_buf_size))) {
+
+      PFATAL("alloc");
+
+    }
 
     memcpy(py->post_process_buf, PyByteArray_AsString(py_value),
            py_out_buf_size);
@@ -527,7 +529,8 @@
   if (py_value != NULL) {
 
     ret = PyByteArray_Size(py_value);
-    *out_buf = ck_maybe_grow(BUF_PARAMS(trim), ret);
+    *out_buf = afl_realloc(BUF_PARAMS(trim), ret);
+    if (unlikely(!out_buf)) { PFATAL("alloc"); }
     memcpy(*out_buf, PyByteArray_AsString(py_value), ret);
     Py_DECREF(py_value);
 
@@ -592,7 +595,8 @@
     } else {
 
       /* A new buf is needed... */
-      *out_buf = ck_maybe_grow(BUF_PARAMS(havoc), mutated_size);
+      *out_buf = afl_realloc(BUF_PARAMS(havoc), mutated_size);
+      if (unlikely(!out_buf)) { PFATAL("alloc"); }
 
     }
 
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index f35df91..0c47284 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -26,8 +26,6 @@
 #include <limits.h>
 #include <ctype.h>
 
-#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
-
 /* Mark deterministic checks as done for a particular queue entry. We use the
    .state file to avoid repeating deterministic fuzzing when resuming aborted
    scans. */
@@ -248,8 +246,9 @@
 
   }
 
-  struct queue_entry **queue_buf = ck_maybe_grow(
-      BUF_PARAMS(queue), afl->queued_paths * sizeof(struct queue_entry *));
+  struct queue_entry **queue_buf = afl_realloc(
+      AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *));
+  if (unlikely(!queue_buf)) { PFATAL("alloc"); }
   queue_buf[afl->queued_paths - 1] = q;
 
   afl->last_path_time = get_cur_time();
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index f21dd0b..1ae6ab5 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -313,8 +313,6 @@
 
 }
 
-#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
-
 static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
                               u64 pattern, u64 repl, u64 o_pattern, u32 idx,
                               u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse,
@@ -358,7 +356,8 @@
     size_t old_len = endptr - buf_8;
     size_t num_len = snprintf(NULL, 0, "%lld", num);
 
-    u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len);
+    u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
+    if (unlikely(!new_buf)) { PFATAL("alloc"); }
     memcpy(new_buf, buf, idx);
 
     snprintf(new_buf + idx, num_len, "%lld", num);
@@ -371,7 +370,8 @@
     size_t old_len = endptr - buf_8;
     size_t num_len = snprintf(NULL, 0, "%llu", unum);
 
-    u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len);
+    u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len);
+    if (unlikely(!new_buf)) { PFATAL("alloc"); }
     memcpy(new_buf, buf, idx);
 
     snprintf(new_buf + idx, num_len, "%llu", unum);
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index d3f823c..d71ec33 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -135,8 +135,6 @@
 
 }
 
-#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
-
 /* The same, but with an adjustable gap. Used for trimming. */
 
 static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at,
@@ -149,7 +147,8 @@
   This memory is used to carry out the post_processing(if present) after copying
   the testcase by removing the gaps. This can break though
   */
-  u8 *mem_trimmed = ck_maybe_grow(BUF_PARAMS(out_scratch), len - skip_len + 1);
+  u8 *mem_trimmed = afl_realloc(AFL_BUF_PARAM(out_scratch), len - skip_len + 1);
+  if (unlikely(!mem_trimmed)) { PFATAL("alloc"); }
 
   ssize_t new_size = len - skip_len;
   void *  new_mem = mem;
@@ -288,8 +287,6 @@
 
 }
 
-#undef BUF_PARAMS
-
 /* Calibrate a new test case. This is done when processing the input directory
    to warn about flaky or otherwise problematic test cases early on; and when
    new paths are discovered to detect variable behavior and so on. */
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index d4de91a..e68e778 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -421,13 +421,13 @@
   if (afl->pass_stats) { ck_free(afl->pass_stats); }
   if (afl->orig_cmp_map) { ck_free(afl->orig_cmp_map); }
 
-  if (afl->queue_buf) { free(afl->queue_buf); }
-  if (afl->out_buf) { free(afl->out_buf); }
-  if (afl->out_scratch_buf) { free(afl->out_scratch_buf); }
-  if (afl->eff_buf) { free(afl->eff_buf); }
-  if (afl->in_buf) { free(afl->in_buf); }
-  if (afl->in_scratch_buf) { free(afl->in_scratch_buf); }
-  if (afl->ex_buf) { free(afl->ex_buf); }
+  afl_free(afl->queue_buf);
+  afl_free(afl->out_buf);
+  afl_free(afl->out_scratch_buf);
+  afl_free(afl->eff_buf);
+  afl_free(afl->in_buf);
+  afl_free(afl->in_scratch_buf);
+  afl_free(afl->ex_buf);
 
   ck_free(afl->virgin_bits);
   ck_free(afl->virgin_tmout);
diff --git a/test/unittests/unit_maybe_alloc.c b/test/unittests/unit_maybe_alloc.c
index 889ced8..e452e2f 100644
--- a/test/unittests/unit_maybe_alloc.c
+++ b/test/unittests/unit_maybe_alloc.c
@@ -42,7 +42,24 @@
     return 1;
 }
 
-#define BUF_PARAMS (void **)&buf, &size
+#define VOID_BUF (void **)&buf
+
+static void *create_fake_maybe_grow_of(size_t size) {
+
+    size += AFL_ALLOC_SIZE_OFFSET;
+
+    // fake a realloc buf
+    
+    struct afl_alloc_buf *buf = malloc(size);
+    if (!buf) {
+        perror("Could not allocate fake buf");
+        return NULL;
+    }
+    buf->complete_size = size; // The size
+    void *actual_buf = (void *)(buf->buf);
+    return actual_buf;
+
+}
 
 /*
 static int setup(void **state) {
@@ -52,29 +69,55 @@
 }
 */
 
+static void test_pow2(void **state) {
+    (void)state;
+
+    assert_int_equal(next_pow2(64), 64);
+    assert_int_equal(next_pow2(63), 64);
+    assert_int_not_equal(next_pow2(65), 65);
+    assert_int_equal(next_pow2(0x100), 0x100);
+    assert_int_equal(next_pow2(0x180), 0x200);
+    assert_int_equal(next_pow2(108), 0x80);
+    assert_int_equal(next_pow2(0), 0);
+    assert_int_equal(next_pow2(1), 1);
+    assert_int_equal(next_pow2(2), 2);
+    assert_int_equal(next_pow2(3), 4);
+    assert_int_equal(next_pow2(0xFFFFFF), 0x1000000);
+    assert_int_equal(next_pow2(0xFFFFFFF), 0x10000000);
+    assert_int_equal(next_pow2(0xFFFFFF0), 0x10000000);
+    assert_int_equal(next_pow2(SIZE_MAX), 0);
+    assert_int_equal(next_pow2(-1), 0);
+    assert_int_equal(next_pow2(-2), 0);
+
+}
+
 static void test_null_allocs(void **state) {
     (void)state;
 
     void *buf = NULL;
-    size_t size = 0;
-    void *ptr = ck_maybe_grow(BUF_PARAMS, 100);
+    void *ptr = afl_realloc(VOID_BUF, 100);
+    if (unlikely(!buf)) { PFATAL("alloc"); }
+    size_t size = afl_alloc_bufsize(buf);
     assert_true(buf == ptr);
     assert_true(size >= 100);
-    ck_free(ptr);
+    afl_free(ptr);
 
 }
 
 static void test_nonpow2_size(void **state) {
     (void)state;
 
-    char *buf = ck_alloc(150);
-    size_t size = 150;
+    char *buf = create_fake_maybe_grow_of(150);
+
     buf[140] = '5';
-    char *ptr = ck_maybe_grow(BUF_PARAMS, 160);
+
+    char *ptr = afl_realloc(VOID_BUF, 160);
+    if (unlikely(!ptr)) { PFATAL("alloc"); }
+    size_t size = afl_alloc_bufsize(buf);
     assert_ptr_equal(buf, ptr);
     assert_true(size >= 160);
     assert_true(buf[140] == '5');
-    ck_free(ptr);
+    afl_free(ptr);
 
 }
 
@@ -83,32 +126,37 @@
 
     char *buf = NULL;
     size_t size = 0;
-    assert_non_null(maybe_grow(BUF_PARAMS, 0));
-    free(buf);
+    char *new_buf = afl_realloc(VOID_BUF, 0);
+    assert_non_null(new_buf);
+    assert_ptr_equal(buf, new_buf);
+    afl_free(buf);
     buf = NULL;
     size = 0;
 
-    char *ptr = ck_maybe_grow(BUF_PARAMS, 100);
+    char *ptr = afl_realloc(VOID_BUF, 100);
+    if (unlikely(!ptr)) { PFATAL("alloc"); }
+    size = afl_alloc_bufsize(buf);
     assert_non_null(ptr);
     assert_ptr_equal(buf, ptr);
     assert_true(size >= 100);
 
-    expect_assert_failure(ck_maybe_grow(BUF_PARAMS, 0));
-
-    ck_free(ptr);
+    afl_free(ptr);
 
 }
 
+
 static void test_unchanged_size(void **state) {
     (void)state;
 
-    void *buf = ck_alloc(100);
-    size_t size = 100;
-    void *buf_before = buf;
-    void *buf_after = ck_maybe_grow(BUF_PARAMS, 100);
-    assert_ptr_equal(buf, buf_after);
+    // fake a realloc buf
+    void *actual_buf = create_fake_maybe_grow_of(100);
+
+    void *buf_before = actual_buf;
+    void *buf_after = afl_realloc(&actual_buf, 100);
+    if (unlikely(!buf_after)) { PFATAL("alloc"); }
+    assert_ptr_equal(actual_buf, buf_after);
     assert_ptr_equal(buf_after, buf_before);
-    ck_free(buf);
+    afl_free(buf_after);
 
 }
 
@@ -118,29 +166,35 @@
     char *buf = NULL;
     size_t size = 0;
 
-    char *ptr = ck_maybe_grow(BUF_PARAMS, 100);
+    char *ptr = afl_realloc(VOID_BUF, 100);
+    if (unlikely(!ptr)) { PFATAL("alloc"); }
+    size = afl_alloc_bufsize(ptr);
     assert_ptr_equal(ptr, buf);
     assert_true(size >= 100);
-    assert_int_equal(size, next_pow2(size));
+    assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET);
     buf[50] = '5';
 
-    ptr = (char *)ck_maybe_grow(BUF_PARAMS, 1000);
+    ptr = (char *)afl_realloc(VOID_BUF, 1000);
+    if (unlikely(!ptr)) { PFATAL("alloc"); }
+    size = afl_alloc_bufsize(ptr);
     assert_ptr_equal(ptr, buf);
     assert_true(size >= 100);
-    assert_int_equal(size, next_pow2(size));
+    assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET);
     buf[500] = '5';
 
-    ptr = (char *)ck_maybe_grow(BUF_PARAMS, 10000);
+    ptr = (char *)afl_realloc(VOID_BUF, 10000);
+    if (unlikely(!ptr)) { PFATAL("alloc"); }
+    size = afl_alloc_bufsize(ptr);
     assert_ptr_equal(ptr, buf);
     assert_true(size >= 10000);
-    assert_int_equal(size, next_pow2(size));
+    assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET);
     buf[5000] = '5';
 
     assert_int_equal(buf[50], '5');
     assert_int_equal(buf[500], '5');
     assert_int_equal(buf[5000], '5');
 
-    ck_free(buf);
+    afl_free(buf);
 
 }
 
@@ -157,6 +211,7 @@
     (void)argv;
 
 	const struct CMUnitTest tests[] = {
+		cmocka_unit_test(test_pow2),
 		cmocka_unit_test(test_null_allocs),
 		cmocka_unit_test(test_nonpow2_size),
 		cmocka_unit_test(test_zero_size),