RESTRICT AUTOMERGE: Cherry-pick "begin cleanup of malloc porting layer"
Bug: 78354855
Test: Not feasible
Original description:
========================================================================
1. Merge some of the allocators into sk_malloc_flags by redefining a flag to mean zero-init
2. Add more private helpers to simplify our call-sites (and handle some overflow mul checks)
3. The 2-param helpers rely on the saturating SkSafeMath::Mul to pass max_size_t as the request,
which should always fail.
chromium: 508641
Reviewed-on: https://skia-review.googlesource.com/90940
Commit-Queue: Mike Reed <reed@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
Reviewed-by: Stephan Altmueller <stephana@google.com>
========================================================================
Conflicts:
- include/private/SkMalloc.h
Simply removed the old definitions of SK_MALLOC_TEMP and SK_MALLOC_THROW.
- public.bzl
Copied SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER into the old defines.
- src/codec/SkIcoCodec.cpp
Drop a change where we were not using malloc yet.
- src/codec/SkBmpBaseCodec.cpp
- src/core/SkBitmapCache.cpp
These files weren't yet using malloc (and SkBmpBaseCodec hadn't been
factored out).
- src/core/SkMallocPixelRef.cpp
These were still using New rather than Make (return raw pointer). Leave
them unchanged, as sk_malloc_flags is still valid.
- src/lazy/SkDiscardableMemoryPool.cpp
Leave this unchanged; sk_malloc_flags is still valid
In addition, pull in SkSafeMath.h, which was originally introduced in
https://skia-review.googlesource.com/c/skia/+/33721. This is required
for the new sk_malloc calls.
Also pull in SkSafeMath::Add and SkSafeMath::Mul, introduced in
https://skia-review.googlesource.com/88581
Also add SK_MaxSizeT, which the above depends on, introduced in
https://skia-review.googlesource.com/57084
Also, modify NewFromStream to use sk_malloc_canfail, matching pi and
avoiding a build break
Change-Id: Ib320484673a865460fc1efb900f611209e088edb
(cherry picked from commit a12cc3e14ea6734c7efe76aa6a19239909830b28)
diff --git a/include/core/SkTypes.h b/include/core/SkTypes.h
index 1dd672b..a3e295e 100644
--- a/include/core/SkTypes.h
+++ b/include/core/SkTypes.h
@@ -235,6 +235,7 @@
#define SK_MaxU32 0xFFFFFFFF
#define SK_MinU32 0
#define SK_NaN32 ((int) (1U << 31))
+#define SK_MaxSizeT SIZE_MAX
/** Returns true if the value can be represented with signed 16bits
*/
diff --git a/include/private/SkMalloc.h b/include/private/SkMalloc.h
index 58e11f6..5c4f9b6 100644
--- a/include/private/SkMalloc.h
+++ b/include/private/SkMalloc.h
@@ -17,27 +17,53 @@
memory wrappers to be implemented by the porting layer (platform)
*/
-enum {
- SK_MALLOC_TEMP = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
- SK_MALLOC_THROW = 0x02 //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
-};
-/** Return a block of memory (at least 4-byte aligned) of at least the
- specified size. If the requested memory cannot be returned, either
- return null (if SK_MALLOC_TEMP bit is clear) or throw an exception
- (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
-*/
-SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
-/** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
-*/
-SK_API extern void* sk_malloc_throw(size_t size);
-/** Same as standard realloc(), but this one never returns null on failure. It will throw
- an exception if it fails.
-*/
-SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
-/** Free memory returned by sk_malloc(). It is safe to pass null.
-*/
+
+/** Free memory returned by sk_malloc(). It is safe to pass null. */
SK_API extern void sk_free(void*);
+/**
+ * Called internally if we run out of memory. The platform implementation must
+ * not return, but should either throw an exception or otherwise exit.
+ */
+SK_API extern void sk_out_of_memory(void);
+
+enum {
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+ SK_MALLOC_TEMP = 1,
+#else
+ /**
+ * If this bit is set, the returned buffer must be zero-initialized. If this bit is not set
+ * the buffer can be uninitialized.
+ */
+ SK_MALLOC_ZERO_INITIALIZE = 1 << 0,
+#endif
+
+ /**
+ * If this bit is set, the implementation must throw/crash/quit if the request cannot
+ * be fulfilled. If this bit is not set, then it should return nullptr on failure.
+ */
+ SK_MALLOC_THROW = 1 << 1,
+};
+/**
+ * Return a block of memory (at least 4-byte aligned) of at least the specified size.
+ * If the requested memory cannot be returned, either return nullptr or throw/exit, depending
+ * on the SK_MALLOC_THROW bit. If the allocation succeeds, the memory will be zero-initialized
+ * if the SK_MALLOC_ZERO_INITIALIZE bit was set.
+ *
+ * To free the memory, call sk_free()
+ */
+SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
+
+/** Same as standard realloc(), but this one never returns null on failure. It will throw
+ * an exception if it fails.
+ */
+SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
+
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+
+/** Same as sk_malloc_flags(), but hard coded to pass SK_MALLOC_THROW as the flag */
+SK_API extern void* sk_malloc_throw(size_t size);
+
/** Much like calloc: returns a pointer to at least size zero bytes, or NULL on failure.
*/
SK_API extern void* sk_calloc(size_t size);
@@ -46,10 +72,36 @@
*/
SK_API extern void* sk_calloc_throw(size_t size);
-/** Called internally if we run out of memory. The platform implementation must
- not return, but should either throw an exception or otherwise exit.
-*/
-SK_API extern void sk_out_of_memory(void);
+#else
+static inline void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+static inline void* sk_calloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_ZERO_INITIALIZE);
+}
+#endif
+
+static inline void* sk_calloc_canfail(size_t size) {
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+ return sk_calloc(size);
+#else
+ return sk_malloc_flags(size, SK_MALLOC_ZERO_INITIALIZE);
+#endif
+}
+
+// Performs a safe multiply count * elemSize, checking for overflow
+SK_API extern void* sk_calloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_malloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize);
+
+/**
+ * These variants return nullptr on failure
+ */
+static inline void* sk_malloc_canfail(size_t size) {
+ return sk_malloc_flags(size, 0);
+}
+SK_API extern void* sk_malloc_canfail(size_t count, size_t elemSize);
// bzero is safer than memset, but we can't rely on it, so... sk_bzero()
static inline void sk_bzero(void* buffer, size_t size) {
diff --git a/include/private/SkTArray.h b/include/private/SkTArray.h
index a6e4ded..22dded3 100644
--- a/include/private/SkTArray.h
+++ b/include/private/SkTArray.h
@@ -445,7 +445,7 @@
fReserved = false;
} else {
fAllocCount = SkTMax(count, SkTMax(kMinHeapAllocCount, reserveCount));
- fMemArray = sk_malloc_throw(fAllocCount * sizeof(T));
+ fMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
fOwnMemory = true;
fReserved = reserveCount > 0;
}
@@ -460,7 +460,7 @@
fReserved = false;
if (count > preallocCount) {
fAllocCount = SkTMax(count, kMinHeapAllocCount);
- fMemArray = sk_malloc_throw(fAllocCount * sizeof(T));
+ fMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
fOwnMemory = true;
} else {
fAllocCount = preallocCount;
@@ -537,7 +537,7 @@
return;
}
fAllocCount = newAllocCount;
- void* newMemArray = sk_malloc_throw(fAllocCount * sizeof(T));
+ void* newMemArray = sk_malloc_throw(fAllocCount, sizeof(T));
this->move(newMemArray);
if (fOwnMemory) {
sk_free(fMemArray);
diff --git a/include/private/SkTemplates.h b/include/private/SkTemplates.h
index 919d160..b4718fc 100644
--- a/include/private/SkTemplates.h
+++ b/include/private/SkTemplates.h
@@ -175,12 +175,7 @@
}
if (count > kCount) {
- const uint64_t size64 = sk_64_mul(count, sizeof(T));
- const size_t size = static_cast<size_t>(size64);
- if (size != size64) {
- sk_out_of_memory();
- }
- fArray = (T*) sk_malloc_throw(size);
+ fArray = (T*) sk_malloc_throw(count, sizeof(T));
} else if (count > 0) {
fArray = (T*) fStorage;
} else {
@@ -250,7 +245,7 @@
/** Allocates space for 'count' Ts. */
explicit SkAutoTMalloc(size_t count) {
- fPtr = count ? (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW) : nullptr;
+ fPtr = count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr;
}
SkAutoTMalloc(SkAutoTMalloc<T>&& that) : fPtr(that.release()) {}
@@ -271,7 +266,7 @@
/** Resize the memory area pointed to by the current ptr without preserving contents. */
T* reset(size_t count = 0) {
sk_free(fPtr);
- fPtr = count ? (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW) : nullptr;
+ fPtr = count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr;
return fPtr;
}
@@ -322,7 +317,7 @@
SkAutoSTMalloc(size_t count) {
if (count > kCount) {
- fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_MALLOC_TEMP);
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
} else if (count) {
fPtr = fTStorage;
} else {
@@ -342,7 +337,7 @@
sk_free(fPtr);
}
if (count > kCount) {
- fPtr = (T*)sk_malloc_throw(count * sizeof(T));
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
} else if (count) {
fPtr = fTStorage;
} else {
@@ -373,14 +368,14 @@
void realloc(size_t count) {
if (count > kCount) {
if (fPtr == fTStorage) {
- fPtr = (T*)sk_malloc_throw(count * sizeof(T));
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
memcpy(fPtr, fTStorage, kCount * sizeof(T));
} else {
- fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T));
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
}
} else if (count) {
if (fPtr != fTStorage) {
- fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T));
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
}
} else {
this->reset(0);
diff --git a/public.bzl b/public.bzl
index 51c8069..9f12265 100644
--- a/public.bzl
+++ b/public.bzl
@@ -672,6 +672,7 @@
"SK_NO_ANALYTIC_AA",
# Experiment to diagnose image diffs in Google3
"SK_DISABLE_SSSE3_RUNTIME_CHECK_FOR_LOWP_STAGES",
+ "SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER",
]
################################################################################
diff --git a/src/codec/SkIcoCodec.cpp b/src/codec/SkIcoCodec.cpp
index 2e61bfe..b793a6d 100644
--- a/src/codec/SkIcoCodec.cpp
+++ b/src/codec/SkIcoCodec.cpp
@@ -58,8 +58,7 @@
uint32_t offset;
uint32_t size;
};
- SkAutoFree dirEntryBuffer(sk_malloc_flags(sizeof(Entry) * numImages,
- SK_MALLOC_TEMP));
+ SkAutoFree dirEntryBuffer(sk_malloc_canfail(sizeof(Entry) * numImages));
if (!dirEntryBuffer) {
SkCodecPrintf("Error: OOM allocating ICO directory for %i images.\n",
numImages);
@@ -135,7 +134,7 @@
bytesRead = offset;
// Create a new stream for the embedded codec
- SkAutoFree buffer(sk_malloc_flags(size, 0));
+ SkAutoFree buffer(sk_malloc_canfail(size));
if (!buffer) {
SkCodecPrintf("Warning: OOM trying to create embedded stream.\n");
break;
diff --git a/src/core/SkAutoMalloc.h b/src/core/SkAutoMalloc.h
index 8672cd8..1a1a7d2 100644
--- a/src/core/SkAutoMalloc.h
+++ b/src/core/SkAutoMalloc.h
@@ -144,7 +144,7 @@
SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
fPtr = fStorage;
} else {
- fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
+ fPtr = sk_malloc_throw(size);
}
fSize = size;
diff --git a/src/core/SkAutoPixmapStorage.cpp b/src/core/SkAutoPixmapStorage.cpp
index b41bc90..03f3f03 100644
--- a/src/core/SkAutoPixmapStorage.cpp
+++ b/src/core/SkAutoPixmapStorage.cpp
@@ -40,7 +40,7 @@
if (0 == size) {
return false;
}
- void* pixels = sk_malloc_flags(size, 0);
+ void* pixels = sk_malloc_canfail(size);
if (nullptr == pixels) {
return false;
}
diff --git a/src/core/SkMallocPixelRef.cpp b/src/core/SkMallocPixelRef.cpp
index 6928f38..28d92d2 100644
--- a/src/core/SkMallocPixelRef.cpp
+++ b/src/core/SkMallocPixelRef.cpp
@@ -8,8 +8,27 @@
#include "SkMallocPixelRef.h"
#include "SkBitmap.h"
#include "SkReadBuffer.h"
+#include "SkSafeMath.h"
#include "SkWriteBuffer.h"
+void* sk_calloc_throw(size_t count, size_t elemSize) {
+ return sk_calloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_throw(size_t count, size_t elemSize) {
+ return sk_malloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize) {
+ return sk_realloc_throw(buffer, SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_canfail(size_t count, size_t elemSize) {
+ return sk_malloc_canfail(SkSafeMath::Mul(count, elemSize));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
// assumes ptr was allocated via sk_malloc
static void sk_free_releaseproc(void* ptr, void*) {
sk_free(ptr);
@@ -82,7 +101,7 @@
sk_sp<SkPixelRef> SkMallocPixelRef::MakeZeroed(const SkImageInfo& info,
size_t rowBytes) {
- return MakeUsing(sk_calloc, info, rowBytes);
+ return MakeUsing(sk_calloc_canfail, info, rowBytes);
}
static void sk_data_releaseproc(void*, void* dataPtr) {
diff --git a/src/core/SkMath.cpp b/src/core/SkMath.cpp
index 6eff790..58645da 100644
--- a/src/core/SkMath.cpp
+++ b/src/core/SkMath.cpp
@@ -9,6 +9,7 @@
#include "SkFixed.h"
#include "SkFloatBits.h"
#include "SkFloatingPoint.h"
+#include "SkSafeMath.h"
#include "SkScalar.h"
#define sub_shift(zeros, x, n) \
@@ -67,8 +68,6 @@
return root;
}
-///////////////////////////////////////////////////////////////////////////////
-
float SkScalarSinCos(float radians, float* cosValue) {
float sinValue = sk_float_sin(radians);
@@ -84,3 +83,17 @@
}
return sinValue;
}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkSafeMath::Add(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t sum = tmp.add(x, y);
+ return tmp.ok() ? sum : SK_MaxSizeT;
+}
+
+size_t SkSafeMath::Mul(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t prod = tmp.mul(x, y);
+ return tmp.ok() ? prod : SK_MaxSizeT;
+}
diff --git a/src/core/SkRegion_path.cpp b/src/core/SkRegion_path.cpp
index fec7825..e6af059 100644
--- a/src/core/SkRegion_path.cpp
+++ b/src/core/SkRegion_path.cpp
@@ -144,12 +144,7 @@
}
fStorageCount = sk_64_asS32(count);
- int64_t size = sk_64_mul(fStorageCount, sizeof(SkRegion::RunType));
- if (size < 0 || !sk_64_isS32(size)) {
- return false;
- }
-
- fStorage = (SkRegion::RunType*)sk_malloc_flags(sk_64_asS32(size), 0);
+ fStorage = (SkRegion::RunType*)sk_malloc_canfail(fStorageCount, sizeof(SkRegion::RunType));
if (nullptr == fStorage) {
return false;
}
diff --git a/src/core/SkSafeMath.h b/src/core/SkSafeMath.h
new file mode 100644
index 0000000..0bc0fbf
--- /dev/null
+++ b/src/core/SkSafeMath.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafeMath_DEFINED
+#define SkSafeMath_DEFINED
+
+#include "SkTypes.h"
+
+// SkSafeMath always check that a series of operations do not overflow.
+// This must be correct for all platforms, because this is a check for safety at runtime.
+
+class SkSafeMath {
+public:
+ SkSafeMath() = default;
+
+ bool ok() const { return fOK; }
+ explicit operator bool() const { return fOK; }
+
+ size_t mul(size_t x, size_t y) {
+ return sizeof(size_t) == sizeof(uint64_t) ? mul64(x, y) : mul32(x, y);
+ }
+
+ size_t add(size_t x, size_t y) {
+ size_t result = x + y;
+ fOK &= result >= x;
+ return result;
+ }
+
+ /**
+ * Return a + b, unless this result is an overflow/underflow. In those cases, fOK will
+ * be set to false, and it is undefined what this returns.
+ */
+ int addInt(int a, int b) {
+ if (b < 0 && a < std::numeric_limits<int>::min() - b) {
+ fOK = false;
+ return a;
+ } else if (b > 0 && a > std::numeric_limits<int>::max() - b) {
+ fOK = false;
+ return a;
+ }
+ return a + b;
+ }
+
+ size_t alignUp(size_t x, size_t alignment) {
+ SkASSERT(alignment && !(alignment & (alignment - 1)));
+ return add(x, alignment - 1) & ~(alignment - 1);
+ }
+
+ template <typename T> T castTo(size_t value) {
+ if (!SkTFitsIn<T>(value)) {
+ fOK = false;
+ }
+ return static_cast<T>(value);
+ }
+
+ // These saturate to their results
+ static size_t Add(size_t x, size_t y);
+ static size_t Mul(size_t x, size_t y);
+ static size_t Align4(size_t x) {
+ SkSafeMath safe;
+ return safe.alignUp(x, 4);
+ }
+
+private:
+ uint32_t mul32(uint32_t x, uint32_t y) {
+ uint64_t bx = x;
+ uint64_t by = y;
+ uint64_t result = bx * by;
+ fOK &= result >> 32 == 0;
+ return result;
+ }
+
+ uint64_t mul64(uint64_t x, uint64_t y) {
+ if (x <= std::numeric_limits<uint64_t>::max() >> 32
+ && y <= std::numeric_limits<uint64_t>::max() >> 32) {
+ return x * y;
+ } else {
+ auto hi = [](uint64_t x) { return x >> 32; };
+ auto lo = [](uint64_t x) { return x & 0xFFFFFFFF; };
+
+ uint64_t lx_ly = lo(x) * lo(y);
+ uint64_t hx_ly = hi(x) * lo(y);
+ uint64_t lx_hy = lo(x) * hi(y);
+ uint64_t hx_hy = hi(x) * hi(y);
+ uint64_t result = 0;
+ result = this->add(lx_ly, (hx_ly << 32));
+ result = this->add(result, (lx_hy << 32));
+ fOK &= (hx_hy + (hx_ly >> 32) + (lx_hy >> 32)) == 0;
+
+ #if defined(SK_DEBUG) && defined(__clang__) && defined(__x86_64__)
+ auto double_check = (unsigned __int128)x * y;
+ SkASSERT(result == (double_check & 0xFFFFFFFFFFFFFFFF));
+ SkASSERT(!fOK || (double_check >> 64 == 0));
+ #endif
+
+ return result;
+ }
+ }
+ bool fOK = true;
+};
+
+#endif//SkSafeMath_DEFINED
diff --git a/src/gpu/GrBuffer.cpp b/src/gpu/GrBuffer.cpp
index c92b296..cd7862a 100644
--- a/src/gpu/GrBuffer.cpp
+++ b/src/gpu/GrBuffer.cpp
@@ -16,7 +16,7 @@
if (gpu->caps()->mustClearUploadedBufferData()) {
cpuData = sk_calloc_throw(sizeInBytes);
} else {
- cpuData = sk_malloc_flags(sizeInBytes, SK_MALLOC_THROW);
+ cpuData = sk_malloc_throw(sizeInBytes);
}
if (data) {
memcpy(cpuData, data, sizeInBytes);
diff --git a/src/ports/SkMemory_malloc.cpp b/src/ports/SkMemory_malloc.cpp
index 6c21dec..8c4a917 100644
--- a/src/ports/SkMemory_malloc.cpp
+++ b/src/ports/SkMemory_malloc.cpp
@@ -55,10 +55,6 @@
#endif
}
-void* sk_malloc_throw(size_t size) {
- return sk_malloc_flags(size, SK_MALLOC_THROW);
-}
-
void* sk_realloc_throw(void* addr, size_t size) {
return throw_on_failure(size, realloc(addr, size));
}
@@ -70,7 +66,16 @@
}
void* sk_malloc_flags(size_t size, unsigned flags) {
- void* p = malloc(size);
+ void* p;
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+ p = malloc(size);
+#else
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ p = calloc(size, 1);
+ } else {
+ p = malloc(size);
+ }
+#endif
if (flags & SK_MALLOC_THROW) {
return throw_on_failure(size, p);
} else {
@@ -78,10 +83,15 @@
}
}
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
void* sk_calloc(size_t size) {
return calloc(size, 1);
}
-
void* sk_calloc_throw(size_t size) {
return throw_on_failure(size, sk_calloc(size));
}
+#endif
+
diff --git a/src/ports/SkMemory_mozalloc.cpp b/src/ports/SkMemory_mozalloc.cpp
index bf5971b..e13b946 100644
--- a/src/ports/SkMemory_mozalloc.cpp
+++ b/src/ports/SkMemory_mozalloc.cpp
@@ -22,20 +22,26 @@
mozalloc_handle_oom(0);
}
-void* sk_malloc_throw(size_t size) {
- return sk_malloc_flags(size, SK_MALLOC_THROW);
+void sk_free(void* p) {
+ free(p);
}
void* sk_realloc_throw(void* addr, size_t size) {
return moz_xrealloc(addr, size);
}
-void sk_free(void* p) {
- free(p);
+void* sk_malloc_flags(size_t size, unsigned flags) {
+#ifndef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ return (flags & SK_MALLOC_THROW) ? moz_xcalloc(size, 1) : calloc(size, 1);
+ }
+#endif
+ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
}
-void* sk_malloc_flags(size_t size, unsigned flags) {
- return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
+#ifdef SK_SUPPORT_LEGACY_MALLOC_PORTING_LAYER
+void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
}
void* sk_calloc(size_t size) {
@@ -45,3 +51,5 @@
void* sk_calloc_throw(size_t size) {
return moz_xcalloc(size, 1);
}
+#endif
+