Make *allocx() size class overflow behavior defined.

Limit supported size and alignment to HUGE_MAXCLASS, which in turn is
now limited to be less than PTRDIFF_MAX.

This resolves #278 and #295.
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index d7b3358..bc5dbd1 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -310,16 +310,14 @@
       <para>The <function>mallocx<parameter/></function> function allocates at
       least <parameter>size</parameter> bytes of memory, and returns a pointer
       to the base address of the allocation.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>, or if request size
-      overflows due to size class and/or alignment constraints.</para>
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>rallocx<parameter/></function> function resizes the
       allocation at <parameter>ptr</parameter> to be at least
       <parameter>size</parameter> bytes, and returns a pointer to the base
       address of the resulting allocation, which may or may not have moved from
       its original location.  Behavior is undefined if
-      <parameter>size</parameter> is <constant>0</constant>, or if request size
-      overflows due to size class and/or alignment constraints.</para>
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>xallocx<parameter/></function> function resizes the
       allocation at <parameter>ptr</parameter> in place to be at least
@@ -354,10 +352,10 @@
       memory, but it performs the same size computation as the
       <function>mallocx<parameter/></function> function, and returns the real
       size of the allocation that would result from the equivalent
-      <function>mallocx<parameter/></function> function call.  Behavior is
-      undefined if <parameter>size</parameter> is <constant>0</constant>, or if
-      request size overflows due to size class and/or alignment
-      constraints.</para>
+      <function>mallocx<parameter/></function> function call, or
+      <constant>0</constant> if the inputs exceed the maximum supported size
+      class and/or alignment.  Behavior is undefined if
+      <parameter>size</parameter> is <constant>0</constant>.</para>
 
       <para>The <function>mallctl<parameter/></function> function provides a
       general interface for introspecting the memory allocator, as well as
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 470eee6..891b9d7 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -536,8 +536,7 @@
 void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
 #endif
 void	arena_quarantine_junk_small(void *ptr, size_t usize);
-void	*arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size,
-    szind_t ind, bool zero);
+void	*arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
 void	*arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
     bool zero, tcache_t *tcache);
 void	*arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index 68d3789..cb6f69e 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -9,9 +9,9 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-void	*huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+void	*huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
     tcache_t *tcache);
-void	*huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+void	*huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
     bool zero, tcache_t *tcache);
 bool	huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
     size_t usize_min, size_t usize_max, bool zero);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 611ed36..3f54391 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -642,7 +642,7 @@
 index2size(szind_t index)
 {
 
-	assert(index <= NSIZES);
+	assert(index < NSIZES);
 	return (index2size_lookup(index));
 }
 
@@ -745,17 +745,16 @@
 			return (usize);
 	}
 
-	/* Huge size class.  Beware of size_t overflow. */
+	/* Huge size class.  Beware of overflow. */
+
+	if (unlikely(alignment > HUGE_MAXCLASS))
+		return (0);
 
 	/*
 	 * We can't achieve subchunk alignment, so round up alignment to the
 	 * minimum that can actually be supported.
 	 */
 	alignment = CHUNK_CEILING(alignment);
-	if (alignment == 0) {
-		/* size_t overflow. */
-		return (0);
-	}
 
 	/* Make sure result is a huge size class. */
 	if (size <= chunksize)
@@ -1106,7 +1105,7 @@
 	size_t usize, copysize;
 
 	usize = sa2u(size + extra, alignment);
-	if (usize == 0)
+	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
 		return (NULL);
 	p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
 	if (p == NULL) {
@@ -1114,7 +1113,7 @@
 			return (NULL);
 		/* Try again, without extra this time. */
 		usize = sa2u(size, alignment);
-		if (usize == 0)
+		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
 			return (NULL);
 		p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
 		if (p == NULL)
diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh
index fc82036..2b0ca29 100755
--- a/include/jemalloc/internal/size_classes.sh
+++ b/include/jemalloc/internal/size_classes.sh
@@ -142,10 +142,10 @@
 
   # All remaining groups.
   lg_grp=$((${lg_grp} + ${lg_g}))
-  while [ ${lg_grp} -lt ${ptr_bits} ] ; do
+  while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
     sep_line
     ndelta=1
-    if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
+    if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
       ndelta_limit=$((${g} - 1))
     else
       ndelta_limit=${g}
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 25eaf14..8357820 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -344,7 +344,6 @@
 	void *ret;
 	tcache_bin_t *tbin;
 	bool tcache_success;
-	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
 
 	assert(binind < nhbins);
 	tbin = &tcache->tbins[binind];
@@ -359,14 +358,15 @@
 		if (unlikely(arena == NULL))
 			return (NULL);
 
-		usize = index2size(binind);
-		assert(usize <= tcache_maxclass);
-		ret = arena_malloc_large(tsd, arena, usize, binind, zero);
+		ret = arena_malloc_large(tsd, arena, binind, zero);
 		if (ret == NULL)
 			return (NULL);
 	} else {
+		size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
 		/* Only compute usize on demand */
-		if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
+		if (config_prof || (slow_path && config_fill) ||
+		    unlikely(zero)) {
 			usize = index2size(binind);
 			assert(usize <= tcache_maxclass);
 		}
diff --git a/include/jemalloc/jemalloc_macros.h.in b/include/jemalloc/jemalloc_macros.h.in
index d164eda..9f356f9 100644
--- a/include/jemalloc/jemalloc_macros.h.in
+++ b/include/jemalloc/jemalloc_macros.h.in
@@ -16,7 +16,8 @@
 #    define MALLOCX_ALIGN(a)	((int)(ffs(a)-1))
 #  else
 #    define MALLOCX_ALIGN(a)						\
-	 ((int)((a < (size_t)INT_MAX) ? ffs((int)a)-1 : ffs((int)(a>>32))+31))
+       ((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :		\
+       ffs((int)((a)>>32))+31))
 #  endif
 #  define MALLOCX_ZERO	((int)0x40)
 /*
diff --git a/src/arena.c b/src/arena.c
index 3f39468..1ceb59f 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2364,16 +2364,16 @@
 }
 
 static void *
-arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
-    bool zero)
+arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
 {
 	void *ret;
 	arena_bin_t *bin;
+	size_t usize;
 	arena_run_t *run;
 
 	assert(binind < NBINS);
 	bin = &arena->bins[binind];
-	size = index2size(binind);
+	usize = index2size(binind);
 
 	malloc_mutex_lock(&bin->lock);
 	if ((run = bin->runcur) != NULL && run->nfree > 0)
@@ -2392,7 +2392,7 @@
 		bin->stats.curregs++;
 	}
 	malloc_mutex_unlock(&bin->lock);
-	if (config_prof && !isthreaded && arena_prof_accum(arena, size))
+	if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
 		prof_idump();
 
 	if (!zero) {
@@ -2401,16 +2401,16 @@
 				arena_alloc_junk_small(ret,
 				    &arena_bin_info[binind], false);
 			} else if (unlikely(opt_zero))
-				memset(ret, 0, size);
+				memset(ret, 0, usize);
 		}
-		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
 	} else {
 		if (config_fill && unlikely(opt_junk_alloc)) {
 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
 			    true);
 		}
-		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-		memset(ret, 0, size);
+		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+		memset(ret, 0, usize);
 	}
 
 	arena_decay_tick(tsd, arena);
@@ -2418,8 +2418,7 @@
 }
 
 void *
-arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
-    bool zero)
+arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
 {
 	void *ret;
 	size_t usize;
@@ -2490,10 +2489,10 @@
 		return (NULL);
 
 	if (likely(size <= SMALL_MAXCLASS))
-		return (arena_malloc_small(tsd, arena, size, ind, zero));
+		return (arena_malloc_small(tsd, arena, ind, zero));
 	if (likely(size <= large_maxclass))
-		return (arena_malloc_large(tsd, arena, size, ind, zero));
-	return (huge_malloc(tsd, arena, size, zero, tcache));
+		return (arena_malloc_large(tsd, arena, ind, zero));
+	return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
 }
 
 /* Only handles large allocations that require more than page alignment. */
@@ -3047,6 +3046,13 @@
 {
 	size_t usize_min, usize_max;
 
+	/* Calls with non-zero extra had to clamp extra. */
+	assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
+
+	/* Prevent exceeding PTRDIFF_MAX. */
+	if (unlikely(size > HUGE_MAXCLASS))
+		return (true);
+
 	usize_min = s2u(size);
 	usize_max = s2u(size + extra);
 	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
@@ -3089,7 +3095,7 @@
 		return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
 		    tcache, true));
 	usize = sa2u(usize, alignment);
-	if (usize == 0)
+	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
 		return (NULL);
 	return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
 }
@@ -3102,7 +3108,7 @@
 	size_t usize;
 
 	usize = s2u(size);
-	if (usize == 0)
+	if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
 		return (NULL);
 
 	if (likely(usize <= large_maxclass)) {
diff --git a/src/ckh.c b/src/ckh.c
index d1cfd23..3b423aa 100644
--- a/src/ckh.c
+++ b/src/ckh.c
@@ -266,7 +266,7 @@
 
 		lg_curcells++;
 		usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-		if (usize == 0) {
+		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
 			ret = true;
 			goto label_return;
 		}
@@ -312,7 +312,7 @@
 	lg_prevbuckets = ckh->lg_curbuckets;
 	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
 	usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
-	if (usize == 0)
+	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
 		return;
 	tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
 	    NULL);
@@ -387,7 +387,7 @@
 	ckh->keycomp = keycomp;
 
 	usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
-	if (usize == 0) {
+	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
 		ret = true;
 		goto label_return;
 	}
diff --git a/src/huge.c b/src/huge.c
index 9f88048..5f7ceaf 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -31,35 +31,30 @@
 }
 
 void *
-huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
     tcache_t *tcache)
 {
-	size_t usize;
 
-	usize = s2u(size);
-	if (usize == 0) {
-		/* size_t overflow. */
-		return (NULL);
-	}
+	assert(usize == s2u(usize));
 
 	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
 }
 
 void *
-huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
     bool zero, tcache_t *tcache)
 {
 	void *ret;
-	size_t usize;
+	size_t ausize;
 	extent_node_t *node;
 	bool is_zeroed;
 
 	/* Allocate one or more contiguous chunks for this request. */
 
-	usize = sa2u(size, alignment);
-	if (unlikely(usize == 0))
+	ausize = sa2u(usize, alignment);
+	if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
 		return (NULL);
-	assert(usize >= chunksize);
+	assert(ausize >= chunksize);
 
 	/* Allocate an extent node with which to track the chunk. */
 	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
@@ -74,15 +69,15 @@
 	is_zeroed = zero;
 	arena = arena_choose(tsd, arena);
 	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
-	    size, alignment, &is_zeroed)) == NULL) {
+	    usize, alignment, &is_zeroed)) == NULL) {
 		idalloctm(tsd, node, tcache, true, true);
 		return (NULL);
 	}
 
-	extent_node_init(node, arena, ret, size, is_zeroed, true);
+	extent_node_init(node, arena, ret, usize, is_zeroed, true);
 
 	if (huge_node_set(ret, node)) {
-		arena_chunk_dalloc_huge(arena, ret, size);
+		arena_chunk_dalloc_huge(arena, ret, usize);
 		idalloctm(tsd, node, tcache, true, true);
 		return (NULL);
 	}
@@ -95,9 +90,9 @@
 
 	if (zero || (config_fill && unlikely(opt_zero))) {
 		if (!is_zeroed)
-			memset(ret, 0, size);
+			memset(ret, 0, usize);
 	} else if (config_fill && unlikely(opt_junk_alloc))
-		memset(ret, 0xa5, size);
+		memset(ret, 0xa5, usize);
 
 	arena_decay_tick(tsd, arena);
 	return (ret);
@@ -286,6 +281,8 @@
 {
 
 	assert(s2u(oldsize) == oldsize);
+	/* The following should have been caught by callers. */
+	assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
 
 	/* Both allocations must be huge to avoid a move. */
 	if (oldsize < chunksize || usize_max < chunksize)
@@ -346,6 +343,9 @@
 	void *ret;
 	size_t copysize;
 
+	/* The following should have been caught by callers. */
+	assert(usize > 0 && usize <= HUGE_MAXCLASS);
+
 	/* Try to avoid moving the allocation. */
 	if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
 		return (ptr);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 86032a4..d9197e0 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1449,18 +1449,17 @@
 		return (NULL);
 	*tsd = tsd_fetch();
 	ind = size2index(size);
+	if (unlikely(ind >= NSIZES))
+		return (NULL);
 
-	if (config_stats ||
-	    (config_prof && opt_prof) ||
-	    (slow_path && config_valgrind && unlikely(in_valgrind))) {
+	if (config_stats || (config_prof && opt_prof) || (slow_path &&
+	    config_valgrind && unlikely(in_valgrind))) {
 		*usize = index2size(ind);
+		assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
 	}
 
-	if (config_prof && opt_prof) {
-		if (unlikely(*usize == 0))
-			return (NULL);
+	if (config_prof && opt_prof)
 		return (imalloc_prof(*tsd, *usize, ind, slow_path));
-	}
 
 	return (imalloc(*tsd, size, ind, slow_path));
 }
@@ -1584,7 +1583,7 @@
 	}
 
 	usize = sa2u(size, alignment);
-	if (unlikely(usize == 0)) {
+	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
 		result = NULL;
 		goto label_oom;
 	}
@@ -1722,12 +1721,12 @@
 	}
 
 	ind = size2index(num_size);
+	if (unlikely(ind >= NSIZES)) {
+		ret = NULL;
+		goto label_return;
+	}
 	if (config_prof && opt_prof) {
 		usize = index2size(ind);
-		if (unlikely(usize == 0)) {
-			ret = NULL;
-			goto label_return;
-		}
 		ret = icalloc_prof(tsd, usize, ind);
 	} else {
 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
@@ -1874,8 +1873,8 @@
 
 		if (config_prof && opt_prof) {
 			usize = s2u(size);
-			ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
-			    ptr, old_usize, usize);
+			ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
+			    NULL : irealloc_prof(tsd, ptr, old_usize, usize);
 		} else {
 			if (config_stats || (config_valgrind &&
 			    unlikely(in_valgrind)))
@@ -2006,7 +2005,8 @@
 		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
 		*usize = sa2u(size, *alignment);
 	}
-	assert(*usize != 0);
+	if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
+		return (true);
 	*zero = MALLOCX_ZERO_GET(flags);
 	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
@@ -2032,7 +2032,6 @@
 
 	if (likely(flags == 0)) {
 		*usize = s2u(size);
-		assert(*usize != 0);
 		*alignment = 0;
 		*zero = false;
 		*tcache = tcache_get(tsd, true);
@@ -2051,6 +2050,8 @@
 	szind_t ind;
 
 	ind = size2index(usize);
+	if (unlikely(ind >= NSIZES))
+		return (NULL);
 	if (unlikely(alignment != 0))
 		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
 	if (unlikely(zero))
@@ -2120,8 +2121,13 @@
 
 	if (likely(flags == 0)) {
 		szind_t ind = size2index(size);
-		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+		if (unlikely(ind >= NSIZES))
+			return (NULL);
+		if (config_stats || (config_valgrind &&
+		    unlikely(in_valgrind))) {
 			*usize = index2size(ind);
+			assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+		}
 		return (imalloc(tsd, size, ind, true));
 	}
 
@@ -2278,7 +2284,8 @@
 
 	if (config_prof && opt_prof) {
 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
-		assert(usize != 0);
+		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+			goto label_oom;
 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
 		    zero, tcache, arena);
 		if (unlikely(p == NULL))
@@ -2392,14 +2399,23 @@
 
 	old_usize = isalloc(ptr, config_prof);
 
-	/* Clamp extra if necessary to avoid (size + extra) overflow. */
-	if (unlikely(size + extra > HUGE_MAXCLASS)) {
-		/* Check for size overflow. */
+	if (unlikely(extra > 0)) {
+		/*
+		 * The API explicitly absolves itself of protecting against
+		 * (size + extra) numerical overflow, but we may need to clamp
+		 * extra to avoid exceeding HUGE_MAXCLASS.
+		 *
+		 * Ordinarily, size limit checking is handled deeper down, but
+		 * here we have to check as part of (size + extra) clamping,
+		 * since we need the clamped value in the above helper
+		 * functions.
+		 */
 		if (unlikely(size > HUGE_MAXCLASS)) {
 			usize = old_usize;
 			goto label_not_resized;
 		}
-		extra = HUGE_MAXCLASS - size;
+		if (unlikely(HUGE_MAXCLASS - size < extra))
+			extra = HUGE_MAXCLASS - size;
 	}
 
 	if (config_valgrind && unlikely(in_valgrind))
@@ -2474,7 +2490,6 @@
 		usize = s2u(size);
 	else
 		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
-	assert(usize != 0);
 	return (usize);
 }
 
@@ -2507,13 +2522,18 @@
 JEMALLOC_ATTR(pure)
 je_nallocx(size_t size, int flags)
 {
+	size_t usize;
 
 	assert(size != 0);
 
 	if (unlikely(malloc_init()))
 		return (0);
 
-	return (inallocx(size, flags));
+	usize = inallocx(size, flags);
+	if (unlikely(usize > HUGE_MAXCLASS))
+		return (0);
+
+	return (usize);
 }
 
 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
index 6253175..35c559a 100644
--- a/test/integration/mallocx.c
+++ b/test/integration/mallocx.c
@@ -46,6 +46,35 @@
 	return (get_size_impl("arenas.hchunk.0.size", ind));
 }
 
+TEST_BEGIN(test_overflow)
+{
+	size_t hugemax, size;
+
+	hugemax = get_huge_size(get_nhuge()-1);
+
+	assert_ptr_null(mallocx(hugemax+1, 0),
+	    "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
+
+	assert_ptr_null(mallocx(PTRDIFF_MAX+1, 0),
+	    "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX+1));
+
+	assert_ptr_null(mallocx(SIZE_T_MAX, 0),
+	    "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+#if LG_SIZEOF_PTR == 3
+	size      = ZU(0x600000000000000);
+#else
+	size      = ZU(0x6000000);
+#endif
+	assert_ptr_null(mallocx(size, 0),
+	    "Expected OOM for mallocx(size=%#zx, 0", size);
+
+	assert_ptr_null(mallocx(1, MALLOCX_ALIGN(PTRDIFF_MAX+1)),
+	    "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+	    ZU(PTRDIFF_MAX+1));
+}
+TEST_END
+
 TEST_BEGIN(test_oom)
 {
 	size_t hugemax, size, alignment;
@@ -176,6 +205,7 @@
 {
 
 	return (test(
+	    test_overflow,
 	    test_oom,
 	    test_basic,
 	    test_alignment_and_size));
diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c
index 022e0bf..3b7d21c 100644
--- a/test/integration/rallocx.c
+++ b/test/integration/rallocx.c
@@ -1,5 +1,51 @@
 #include "test/jemalloc_test.h"
 
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+	unsigned ret;
+	size_t z;
+
+	z = sizeof(unsigned);
+	assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+	return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+	return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+	size_t ret;
+	size_t z;
+	size_t mib[4];
+	size_t miblen = 4;
+
+	z = sizeof(size_t);
+	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+	mib[2] = ind;
+	z = sizeof(size_t);
+	assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+	return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+	return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
 TEST_BEGIN(test_grow_and_shrink)
 {
 	void *p, *q;
@@ -173,6 +219,41 @@
 }
 TEST_END
 
+TEST_BEGIN(test_overflow)
+{
+	size_t hugemax, size;
+	void *p;
+
+	hugemax = get_huge_size(get_nhuge()-1);
+
+	p = mallocx(1, 0);
+	assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+	assert_ptr_null(rallocx(p, hugemax+1, 0),
+	    "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
+
+	assert_ptr_null(rallocx(p, PTRDIFF_MAX+1, 0),
+	    "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX+1));
+
+	assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+	    "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+#if LG_SIZEOF_PTR == 3
+	size      = ZU(0x600000000000000);
+#else
+	size      = ZU(0x6000000);
+#endif
+	assert_ptr_null(rallocx(p, size, 0),
+	    "Expected OOM for rallocx(p, size=%#zx, 0", size);
+
+	assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(PTRDIFF_MAX+1)),
+	    "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+	    ZU(PTRDIFF_MAX+1));
+
+	dallocx(p, 0);
+}
+TEST_END
+
 int
 main(void)
 {
@@ -181,5 +262,6 @@
 	    test_grow_and_shrink,
 	    test_zero,
 	    test_align,
-	    test_lg_align_and_zero));
+	    test_lg_align_and_zero,
+	    test_overflow));
 }
diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c
index d3aaebd..3a2126f 100644
--- a/test/unit/size_classes.c
+++ b/test/unit/size_classes.c
@@ -80,10 +80,33 @@
 }
 TEST_END
 
+TEST_BEGIN(test_overflow)
+{
+	size_t max_size_class;
+
+	max_size_class = get_max_size_class();
+
+	assert_u_ge(size2index(max_size_class+1), NSIZES,
+	    "size2index() should return >= NSIZES on overflow");
+	assert_u_ge(size2index(PTRDIFF_MAX+1), NSIZES,
+	    "size2index() should return >= NSIZES on overflow");
+	assert_u_ge(size2index(SIZE_T_MAX), NSIZES,
+	    "size2index() should return >= NSIZES on overflow");
+
+	assert_zu_gt(s2u(max_size_class+1), HUGE_MAXCLASS,
+	    "s2u() should return > HUGE_MAXCLASS for unsupported size");
+	assert_zu_gt(s2u(PTRDIFF_MAX+1), HUGE_MAXCLASS,
+	    "s2u() should return > HUGE_MAXCLASS for unsupported size");
+	assert_zu_eq(s2u(SIZE_T_MAX), 0,
+	    "s2u() should return 0 on overflow");
+}
+TEST_END
+
 int
 main(void)
 {
 
 	return (test(
-	    test_size_classes));
+	    test_size_classes,
+	    test_overflow));
 }