Handle allocation failure in the flush-time preFlush callback
In general, we drop the entire flush when we detect an allocation
failure. This CL makes an allocation failure in the flush-time preFlush
call also drop the entire flush.
Bug: 1320964
Change-Id: Idd2e24fbf5d2083c4fec8de7ccd028897a239b33
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/536837
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/include/gpu/GrContextOptions.h b/include/gpu/GrContextOptions.h
index d32bb01..ebf80e5 100644
--- a/include/gpu/GrContextOptions.h
+++ b/include/gpu/GrContextOptions.h
@@ -279,6 +279,12 @@
*/
/**
+ * Testing-only mode to exercise allocation failures in the flush-time callback objects.
+ * For now it only simulates allocation failure during the preFlush callback.
+ */
+ bool fFailFlushTimeCallbacks = false;
+
+ /**
* Prevents use of dual source blending, to test that all xfer modes work correctly without it.
*/
bool fSuppressDualSourceBlending = false;
diff --git a/src/gpu/ganesh/GrDrawingManager.cpp b/src/gpu/ganesh/GrDrawingManager.cpp
index 6769120..d59f08a 100644
--- a/src/gpu/ganesh/GrDrawingManager.cpp
+++ b/src/gpu/ganesh/GrDrawingManager.cpp
@@ -165,6 +165,7 @@
GrOnFlushResourceProvider onFlushProvider(this);
// Prepare any onFlush op lists (e.g. atlases).
+ bool preFlushSuccessful = true;
if (!fOnFlushCBObjects.empty()) {
fFlushingRenderTaskIDs.reserve_back(fDAG.count());
for (const auto& task : fDAG) {
@@ -174,8 +175,10 @@
}
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
- onFlushCBObject->preFlush(&onFlushProvider, SkMakeSpan(fFlushingRenderTaskIDs));
+ preFlushSuccessful &= onFlushCBObject->preFlush(&onFlushProvider,
+ SkMakeSpan(fFlushingRenderTaskIDs));
}
+
for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
onFlushRenderTask->makeClosed(fContext);
#ifdef SK_DEBUG
@@ -199,54 +202,59 @@
}
}
- bool usingReorderedDAG = false;
- GrResourceAllocator resourceAllocator(dContext);
- if (fReduceOpsTaskSplitting) {
- usingReorderedDAG = this->reorderTasks(&resourceAllocator);
- if (!usingReorderedDAG) {
- resourceAllocator.reset();
+ bool cachePurgeNeeded = false;
+
+ if (preFlushSuccessful) {
+ bool usingReorderedDAG = false;
+ GrResourceAllocator resourceAllocator(dContext);
+ if (fReduceOpsTaskSplitting) {
+ usingReorderedDAG = this->reorderTasks(&resourceAllocator);
+ if (!usingReorderedDAG) {
+ resourceAllocator.reset();
+ }
}
- }
#if 0
- // Enable this to print out verbose GrOp information
- SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
- for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
- SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
- }
- SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
- for (const auto& task : fDAG) {
- SkDEBUGCODE(task->dump(/* printDependencies */ true);)
- }
+ // Enable this to print out verbose GrOp information
+ SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
+ for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
+ SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
+ }
+ SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.count()));
+ for (const auto& task : fDAG) {
+ SkDEBUGCODE(task->dump(/* printDependencies */ true);)
+ }
#endif
- if (!resourceAllocator.failedInstantiation()) {
- if (!usingReorderedDAG) {
- for (const auto& task : fDAG) {
- SkASSERT(task);
- task->gatherProxyIntervals(&resourceAllocator);
+ if (!resourceAllocator.failedInstantiation()) {
+ if (!usingReorderedDAG) {
+ for (const auto& task : fDAG) {
+ SkASSERT(task);
+ task->gatherProxyIntervals(&resourceAllocator);
+ }
+ resourceAllocator.planAssignment();
}
- resourceAllocator.planAssignment();
+ resourceAllocator.assign();
}
- resourceAllocator.assign();
+
+ cachePurgeNeeded = !resourceAllocator.failedInstantiation() &&
+ this->executeRenderTasks(&flushState);
}
- bool flushed = !resourceAllocator.failedInstantiation() &&
- this->executeRenderTasks(&flushState);
this->removeRenderTasks();
gpu->executeFlushInfo(proxies, access, info, newState);
// Give the cache a chance to purge resources that become purgeable due to flushing.
- if (flushed) {
+ if (cachePurgeNeeded) {
resourceCache->purgeAsNeeded();
- flushed = false;
+ cachePurgeNeeded = false;
}
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(),
SkMakeSpan(fFlushingRenderTaskIDs));
- flushed = true;
+ cachePurgeNeeded = true;
}
- if (flushed) {
+ if (cachePurgeNeeded) {
resourceCache->purgeAsNeeded();
}
fFlushingRenderTaskIDs.reset();
diff --git a/src/gpu/ganesh/GrDynamicAtlas.cpp b/src/gpu/ganesh/GrDynamicAtlas.cpp
index fb6c480..1705b31 100644
--- a/src/gpu/ganesh/GrDynamicAtlas.cpp
+++ b/src/gpu/ganesh/GrDynamicAtlas.cpp
@@ -188,7 +188,7 @@
return true;
}
-void GrDynamicAtlas::instantiate(GrOnFlushResourceProvider* onFlushRP,
+bool GrDynamicAtlas::instantiate(GrOnFlushResourceProvider* onFlushRP,
sk_sp<GrTexture> backingTexture) {
SkASSERT(!this->isInstantiated()); // This method should only be called once.
// Caller should have cropped any paths to the destination render target instead of asking for
@@ -211,7 +211,9 @@
SkASSERT(backingRT->numSamples() == fTextureProxy->asRenderTargetProxy()->numSamples());
SkASSERT(backingRT->dimensions() == fTextureProxy->backingStoreDimensions());
#endif
+ // This works bc 'fTextureProxy' is a lazy proxy and, in its LazyInstantiateAtlasCallback,
+ // it will just wrap 'fBackingTexture' if it is non-null.
fBackingTexture = std::move(backingTexture);
}
- onFlushRP->instatiateProxy(fTextureProxy.get());
+ return onFlushRP->instatiateProxy(fTextureProxy.get());
}
diff --git a/src/gpu/ganesh/GrDynamicAtlas.h b/src/gpu/ganesh/GrDynamicAtlas.h
index b1155ba..733d4c6 100644
--- a/src/gpu/ganesh/GrDynamicAtlas.h
+++ b/src/gpu/ganesh/GrDynamicAtlas.h
@@ -72,7 +72,8 @@
// 'backingTexture', if provided, is a renderable texture with which to instantiate our proxy.
// If null then we will create a texture using the resource provider. The purpose of this param
// is to provide a guaranteed way to recycle textures from previous atlases.
- void instantiate(GrOnFlushResourceProvider*, sk_sp<GrTexture> backingTexture = nullptr);
+ bool SK_WARN_UNUSED_RESULT instantiate(GrOnFlushResourceProvider*,
+ sk_sp<GrTexture> backingTexture = nullptr);
private:
class Node;
diff --git a/src/gpu/ganesh/GrOnFlushResourceProvider.cpp b/src/gpu/ganesh/GrOnFlushResourceProvider.cpp
index d431747..afbce3e 100644
--- a/src/gpu/ganesh/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/ganesh/GrOnFlushResourceProvider.cpp
@@ -39,3 +39,9 @@
const GrCaps* GrOnFlushResourceProvider::caps() const {
return fDrawingMgr->getContext()->priv().caps();
}
+
+#if GR_TEST_UTILS
+bool GrOnFlushResourceProvider::failFlushTimeCallbacks() const {
+ return fDrawingMgr->getContext()->priv().options().fFailFlushTimeCallbacks;
+}
+#endif
diff --git a/src/gpu/ganesh/GrOnFlushResourceProvider.h b/src/gpu/ganesh/GrOnFlushResourceProvider.h
index ce6f6cb..e485b8a 100644
--- a/src/gpu/ganesh/GrOnFlushResourceProvider.h
+++ b/src/gpu/ganesh/GrOnFlushResourceProvider.h
@@ -28,8 +28,9 @@
* The preFlush callback allows subsystems (e.g., text, path renderers) to create atlases
* for a specific flush. All the GrRenderTask IDs required for the flush are passed into the
* callback.
+ * Returns true on success; false on memory allocation failure
*/
- virtual void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> renderTaskIDs) = 0;
+ virtual bool preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> renderTaskIDs) = 0;
/**
* Called once flushing is complete and all renderTasks indicated by preFlush have been executed
@@ -54,10 +55,14 @@
public:
explicit GrOnFlushResourceProvider(GrDrawingManager* drawingMgr) : fDrawingMgr(drawingMgr) {}
- bool instatiateProxy(GrSurfaceProxy*);
+ bool SK_WARN_UNUSED_RESULT instatiateProxy(GrSurfaceProxy*);
const GrCaps* caps() const;
+#if GR_TEST_UTILS
+ bool failFlushTimeCallbacks() const;
+#endif
+
private:
GrOnFlushResourceProvider(const GrOnFlushResourceProvider&) = delete;
GrOnFlushResourceProvider& operator=(const GrOnFlushResourceProvider&) = delete;
diff --git a/src/gpu/ganesh/ops/AtlasPathRenderer.cpp b/src/gpu/ganesh/ops/AtlasPathRenderer.cpp
index 86d4846..ff4775f 100644
--- a/src/gpu/ganesh/ops/AtlasPathRenderer.cpp
+++ b/src/gpu/ganesh/ops/AtlasPathRenderer.cpp
@@ -387,39 +387,52 @@
atlasMatrix, devIBounds));
}
-void AtlasPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
+bool AtlasPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
SkSpan<const uint32_t> /* taskIDs */) {
if (fAtlasRenderTasks.empty()) {
SkASSERT(fAtlasPathCache.count() == 0);
- return;
+ return true;
}
// Verify the atlases can all share the same texture.
SkDEBUGCODE(validate_atlas_dependencies(fAtlasRenderTasks);)
- // Instantiate the first atlas.
- fAtlasRenderTasks[0]->instantiate(onFlushRP);
+ bool successful;
- // Instantiate the remaining atlases.
- GrTexture* firstAtlasTexture = fAtlasRenderTasks[0]->atlasProxy()->peekTexture();
- SkASSERT(firstAtlasTexture);
- for (int i = 1; i < fAtlasRenderTasks.count(); ++i) {
- auto atlasTask = fAtlasRenderTasks[i].get();
- if (atlasTask->atlasProxy()->backingStoreDimensions() == firstAtlasTexture->dimensions()) {
- atlasTask->instantiate(onFlushRP, sk_ref_sp(firstAtlasTexture));
- } else {
- // The atlases are expected to all be full size except possibly the final one.
- SkASSERT(i == fAtlasRenderTasks.count() - 1);
- SkASSERT(atlasTask->atlasProxy()->backingStoreDimensions().area() <
- firstAtlasTexture->dimensions().area());
- // TODO: Recycle the larger atlas texture anyway?
- atlasTask->instantiate(onFlushRP);
+#if GR_TEST_UTILS
+ if (onFlushRP->failFlushTimeCallbacks()) {
+ successful = false;
+ } else
+#endif
+ {
+ // TODO: it seems like this path renderer's backing-texture reuse could be greatly
+ // improved. Please see skbug.com/13298.
+
+ // Instantiate the first atlas.
+ successful = fAtlasRenderTasks[0]->instantiate(onFlushRP);
+
+ // Instantiate the remaining atlases.
+ GrTexture* firstAtlas = fAtlasRenderTasks[0]->atlasProxy()->peekTexture();
+ SkASSERT(firstAtlas);
+ for (int i = 1; successful && i < fAtlasRenderTasks.count(); ++i) {
+ auto atlasTask = fAtlasRenderTasks[i].get();
+ if (atlasTask->atlasProxy()->backingStoreDimensions() == firstAtlas->dimensions()) {
+ successful &= atlasTask->instantiate(onFlushRP, sk_ref_sp(firstAtlas));
+ } else {
+ // The atlases are expected to all be full size except possibly the final one.
+ SkASSERT(i == fAtlasRenderTasks.count() - 1);
+ SkASSERT(atlasTask->atlasProxy()->backingStoreDimensions().area() <
+ firstAtlas->dimensions().area());
+ // TODO: Recycle the larger atlas texture anyway?
+ successful &= atlasTask->instantiate(onFlushRP);
+ }
}
}
// Reset all atlas data.
fAtlasRenderTasks.reset();
fAtlasPathCache.reset();
+ return successful;
}
} // namespace skgpu::v1
diff --git a/src/gpu/ganesh/ops/AtlasPathRenderer.h b/src/gpu/ganesh/ops/AtlasPathRenderer.h
index 2de4123..c098d2a 100644
--- a/src/gpu/ganesh/ops/AtlasPathRenderer.h
+++ b/src/gpu/ganesh/ops/AtlasPathRenderer.h
@@ -87,7 +87,7 @@
// Instantiates texture(s) for all atlases we've created since the last flush. Atlases that are
// the same size will be instantiated with the same backing texture.
- void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
+ bool preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
float fAtlasMaxSize = 0;
float fAtlasMaxPathWidth = 0;
diff --git a/src/gpu/ganesh/ops/AtlasRenderTask.h b/src/gpu/ganesh/ops/AtlasRenderTask.h
index 13e2d5a..6d237d4 100644
--- a/src/gpu/ganesh/ops/AtlasRenderTask.h
+++ b/src/gpu/ganesh/ops/AtlasRenderTask.h
@@ -41,10 +41,10 @@
// Must be called at flush time. The texture proxy is instantiated with 'backingTexture', if
// provided. See GrDynamicAtlas.
- void instantiate(GrOnFlushResourceProvider* onFlushRP,
+ bool SK_WARN_UNUSED_RESULT instantiate(GrOnFlushResourceProvider* onFlushRP,
sk_sp<GrTexture> backingTexture = nullptr) {
SkASSERT(this->isClosed());
- fDynamicAtlas->instantiate(onFlushRP, std::move(backingTexture));
+ return fDynamicAtlas->instantiate(onFlushRP, std::move(backingTexture));
}
private:
diff --git a/src/gpu/ganesh/ops/SmallPathAtlasMgr.h b/src/gpu/ganesh/ops/SmallPathAtlasMgr.h
index 65232f3..37a7a68 100644
--- a/src/gpu/ganesh/ops/SmallPathAtlasMgr.h
+++ b/src/gpu/ganesh/ops/SmallPathAtlasMgr.h
@@ -50,11 +50,18 @@
void setUseToken(SmallPathShapeData*, GrDeferredUploadToken);
// GrOnFlushCallbackObject overrides
- void preFlush(GrOnFlushResourceProvider* onFlushRP,
+ bool preFlush(GrOnFlushResourceProvider* onFlushRP,
SkSpan<const uint32_t> /* taskIDs */) override {
+#if GR_TEST_UTILS
+ if (onFlushRP->failFlushTimeCallbacks()) {
+ return false;
+ }
+#endif
+
if (fAtlas) {
fAtlas->instantiate(onFlushRP);
}
+ return true;
}
void postFlush(GrDeferredUploadToken startTokenForNextFlush,
diff --git a/src/gpu/ganesh/text/GrAtlasManager.h b/src/gpu/ganesh/text/GrAtlasManager.h
index 68ac40f..96112b6 100644
--- a/src/gpu/ganesh/text/GrAtlasManager.h
+++ b/src/gpu/ganesh/text/GrAtlasManager.h
@@ -87,12 +87,19 @@
// GrOnFlushCallbackObject overrides
- void preFlush(GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t>) override {
+ bool preFlush(GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t>) override {
+#if GR_TEST_UTILS
+ if (onFlushRP->failFlushTimeCallbacks()) {
+ return false;
+ }
+#endif
+
for (int i = 0; i < skgpu::kMaskFormatCount; ++i) {
if (fAtlases[i]) {
fAtlases[i]->instantiate(onFlushRP);
}
}
+ return true;
}
void postFlush(GrDeferredUploadToken startTokenForNextFlush, SkSpan<const uint32_t>) override {
diff --git a/tests/LazyProxyTest.cpp b/tests/LazyProxyTest.cpp
index b4642a1..67ffff4 100644
--- a/tests/LazyProxyTest.cpp
+++ b/tests/LazyProxyTest.cpp
@@ -43,9 +43,16 @@
REPORTER_ASSERT(fReporter, fHasClipTexture);
}
- void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t>) override {
+ bool preFlush(GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t>) override {
+#if GR_TEST_UTILS
+ if (onFlushRP->failFlushTimeCallbacks()) {
+ return false;
+ }
+#endif
+
REPORTER_ASSERT(fReporter, !fHasOpTexture);
REPORTER_ASSERT(fReporter, !fHasClipTexture);
+ return true;
}
void postFlush(GrDeferredUploadToken, SkSpan<const uint32_t>) override {
diff --git a/tools/flags/CommonFlagsGpu.cpp b/tools/flags/CommonFlagsGpu.cpp
index 98d0d97..3576403 100644
--- a/tools/flags/CommonFlagsGpu.cpp
+++ b/tools/flags/CommonFlagsGpu.cpp
@@ -20,6 +20,8 @@
static DEFINE_bool(cachePathMasks, true,
"Allows path mask textures to be cached in GPU configs.");
+static DEFINE_bool(failFlushTimeCallbacks, false,
+ "Causes all flush-time callbacks to fail.");
static DEFINE_bool(allPathsVolatile, false,
"Causes all GPU paths to be processed as if 'setIsVolatile' had been called.");
@@ -96,6 +98,7 @@
ctxOptions->fExecutor = gGpuExecutor.get();
ctxOptions->fAllowPathMaskCaching = FLAGS_cachePathMasks;
+ ctxOptions->fFailFlushTimeCallbacks = FLAGS_failFlushTimeCallbacks;
ctxOptions->fAllPathsVolatile = FLAGS_allPathsVolatile;
ctxOptions->fGpuPathRenderers = collect_gpu_path_renderers_from_flags();
ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;